var/home/core/zuul-output/0000755000175000017500000000000015111007441014517 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015111024257015467 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005253712115111024250017672 0ustar rootrootNov 24 07:57:16 crc systemd[1]: Starting Kubernetes Kubelet... Nov 24 07:57:16 crc restorecon[4686]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 07:57:16 crc restorecon[4686]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 07:57:17 crc restorecon[4686]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 24 07:57:17 crc restorecon[4686]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 24 07:57:18 crc kubenswrapper[4691]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 24 07:57:18 crc kubenswrapper[4691]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 24 07:57:18 crc kubenswrapper[4691]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 24 07:57:18 crc kubenswrapper[4691]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 24 07:57:18 crc kubenswrapper[4691]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 24 07:57:18 crc kubenswrapper[4691]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.482971 4691 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490723 4691 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490748 4691 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490754 4691 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490760 4691 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490766 4691 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490771 4691 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490777 4691 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490784 4691 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490790 4691 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490796 4691 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490804 4691 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490811 4691 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490817 4691 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490823 4691 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490830 4691 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490836 4691 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490841 4691 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490847 4691 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490853 4691 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490859 4691 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490865 4691 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490871 4691 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490877 4691 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490884 4691 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490890 4691 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490897 4691 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490904 4691 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490909 4691 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490914 4691 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490920 4691 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490925 4691 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490930 4691 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490935 4691 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490941 4691 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490948 4691 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490955 4691 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490963 4691 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490968 4691 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490974 4691 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490979 4691 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490987 4691 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490992 4691 feature_gate.go:330] unrecognized feature gate: Example Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.490998 4691 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.491003 4691 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.491008 4691 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.491013 4691 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.491018 4691 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.491023 4691 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.491028 4691 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.491033 4691 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.491038 4691 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.491043 4691 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.491049 4691 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.491057 4691 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.491063 4691 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.491070 4691 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.491076 4691 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.491082 4691 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.491088 4691 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.491094 4691 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.491100 4691 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.491105 4691 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.491110 4691 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.491116 4691 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.491121 4691 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.491128 4691 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.491135 4691 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.491140 4691 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.491145 4691 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.491150 4691 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.491155 4691 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491264 4691 flags.go:64] FLAG: --address="0.0.0.0" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491280 4691 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491292 4691 flags.go:64] FLAG: --anonymous-auth="true" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491300 4691 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491308 4691 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491315 4691 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491324 4691 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491332 4691 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491338 4691 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491344 4691 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491351 4691 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491357 4691 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491363 4691 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491369 4691 flags.go:64] FLAG: --cgroup-root="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491375 4691 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491381 4691 flags.go:64] FLAG: --client-ca-file="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491388 4691 flags.go:64] FLAG: --cloud-config="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491393 4691 flags.go:64] FLAG: --cloud-provider="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491399 4691 flags.go:64] FLAG: --cluster-dns="[]" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491407 4691 flags.go:64] FLAG: --cluster-domain="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491413 4691 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491420 4691 flags.go:64] FLAG: --config-dir="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491425 4691 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491432 4691 flags.go:64] FLAG: --container-log-max-files="5" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491439 4691 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491469 4691 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491476 4691 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491482 4691 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491488 4691 flags.go:64] FLAG: --contention-profiling="false" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491495 4691 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491506 4691 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491513 4691 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491519 4691 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491528 4691 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491534 4691 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491540 4691 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491547 4691 flags.go:64] FLAG: --enable-load-reader="false" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491554 4691 flags.go:64] FLAG: --enable-server="true" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491560 4691 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491575 4691 flags.go:64] FLAG: --event-burst="100" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491581 4691 flags.go:64] FLAG: --event-qps="50" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491588 4691 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491594 4691 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491600 4691 flags.go:64] FLAG: --eviction-hard="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491607 4691 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491613 4691 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491626 4691 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491632 4691 flags.go:64] FLAG: --eviction-soft="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491638 4691 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491645 4691 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491651 4691 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491657 4691 flags.go:64] FLAG: --experimental-mounter-path="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491663 4691 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491669 4691 flags.go:64] FLAG: --fail-swap-on="true" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491675 4691 flags.go:64] FLAG: --feature-gates="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491683 4691 flags.go:64] FLAG: --file-check-frequency="20s" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491689 4691 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491695 4691 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491702 4691 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491708 4691 flags.go:64] FLAG: --healthz-port="10248" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491714 4691 flags.go:64] FLAG: --help="false" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491720 4691 flags.go:64] FLAG: --hostname-override="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491726 4691 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491732 4691 flags.go:64] FLAG: --http-check-frequency="20s" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491738 4691 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491747 4691 flags.go:64] FLAG: --image-credential-provider-config="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491752 4691 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491758 4691 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491764 4691 flags.go:64] FLAG: --image-service-endpoint="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491770 4691 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491776 4691 flags.go:64] FLAG: --kube-api-burst="100" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491783 4691 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491789 4691 flags.go:64] FLAG: --kube-api-qps="50" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491796 4691 flags.go:64] FLAG: --kube-reserved="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491803 4691 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491809 4691 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491816 4691 flags.go:64] FLAG: --kubelet-cgroups="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491822 4691 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491830 4691 flags.go:64] FLAG: --lock-file="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491836 4691 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491842 4691 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491848 4691 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491858 4691 flags.go:64] FLAG: --log-json-split-stream="false" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491864 4691 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491870 4691 flags.go:64] FLAG: --log-text-split-stream="false" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491877 4691 flags.go:64] FLAG: --logging-format="text" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491883 4691 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491889 4691 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491895 4691 flags.go:64] FLAG: --manifest-url="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491901 4691 flags.go:64] FLAG: --manifest-url-header="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491909 4691 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491915 4691 flags.go:64] FLAG: --max-open-files="1000000" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491922 4691 flags.go:64] FLAG: --max-pods="110" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491929 4691 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491935 4691 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491941 4691 flags.go:64] FLAG: --memory-manager-policy="None" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491947 4691 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491954 4691 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491960 4691 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491966 4691 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491979 4691 flags.go:64] FLAG: --node-status-max-images="50" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491984 4691 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491991 4691 flags.go:64] FLAG: --oom-score-adj="-999" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.491997 4691 flags.go:64] FLAG: --pod-cidr="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492003 4691 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492022 4691 flags.go:64] FLAG: --pod-manifest-path="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492028 4691 flags.go:64] FLAG: --pod-max-pids="-1" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492034 4691 flags.go:64] FLAG: --pods-per-core="0" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492040 4691 flags.go:64] FLAG: --port="10250" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492047 4691 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492053 4691 flags.go:64] FLAG: --provider-id="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492059 4691 flags.go:64] FLAG: --qos-reserved="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492065 4691 flags.go:64] FLAG: --read-only-port="10255" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492071 4691 flags.go:64] FLAG: --register-node="true" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492078 4691 flags.go:64] FLAG: --register-schedulable="true" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492084 4691 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492099 4691 flags.go:64] FLAG: --registry-burst="10" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492105 4691 flags.go:64] FLAG: --registry-qps="5" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492111 4691 flags.go:64] FLAG: --reserved-cpus="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492117 4691 flags.go:64] FLAG: --reserved-memory="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492125 4691 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492131 4691 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492137 4691 flags.go:64] FLAG: --rotate-certificates="false" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492143 4691 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492149 4691 flags.go:64] FLAG: --runonce="false" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492155 4691 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492161 4691 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492168 4691 flags.go:64] FLAG: --seccomp-default="false" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492174 4691 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492180 4691 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492187 4691 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492194 4691 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492200 4691 flags.go:64] FLAG: --storage-driver-password="root" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492206 4691 flags.go:64] FLAG: --storage-driver-secure="false" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492212 4691 flags.go:64] FLAG: --storage-driver-table="stats" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492218 4691 flags.go:64] FLAG: --storage-driver-user="root" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492224 4691 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492230 4691 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492236 4691 flags.go:64] FLAG: --system-cgroups="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492242 4691 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492251 4691 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492257 4691 flags.go:64] FLAG: --tls-cert-file="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492263 4691 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492270 4691 flags.go:64] FLAG: --tls-min-version="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492276 4691 flags.go:64] FLAG: --tls-private-key-file="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492283 4691 flags.go:64] FLAG: --topology-manager-policy="none" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492290 4691 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492296 4691 flags.go:64] FLAG: --topology-manager-scope="container" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492302 4691 flags.go:64] FLAG: --v="2" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492310 4691 flags.go:64] FLAG: --version="false" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492317 4691 flags.go:64] FLAG: --vmodule="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492325 4691 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492332 4691 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492499 4691 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492508 4691 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492514 4691 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492521 4691 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492527 4691 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492533 4691 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492556 4691 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492564 4691 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492572 4691 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492578 4691 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492586 4691 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492591 4691 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492597 4691 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492603 4691 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492609 4691 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492615 4691 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492621 4691 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492627 4691 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492633 4691 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492638 4691 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492643 4691 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492648 4691 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492653 4691 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492658 4691 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492664 4691 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492670 4691 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492675 4691 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492680 4691 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492686 4691 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492691 4691 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492697 4691 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492702 4691 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492707 4691 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492712 4691 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492717 4691 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492722 4691 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492727 4691 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492737 4691 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492743 4691 feature_gate.go:330] unrecognized feature gate: Example Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492748 4691 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492753 4691 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492758 4691 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492764 4691 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492769 4691 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492775 4691 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492780 4691 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492785 4691 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492790 4691 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492795 4691 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492801 4691 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492806 4691 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492811 4691 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492818 4691 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492824 4691 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492830 4691 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492836 4691 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492841 4691 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492847 4691 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492852 4691 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492858 4691 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492864 4691 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492869 4691 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492875 4691 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492880 4691 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492887 4691 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492892 4691 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492897 4691 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492903 4691 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492908 4691 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492916 4691 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.492923 4691 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.492938 4691 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.507091 4691 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.507140 4691 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507228 4691 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507241 4691 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507247 4691 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507253 4691 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507263 4691 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507270 4691 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507277 4691 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507283 4691 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507289 4691 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507298 4691 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507306 4691 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507312 4691 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507318 4691 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507323 4691 feature_gate.go:330] unrecognized feature gate: Example Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507328 4691 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507334 4691 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507340 4691 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507346 4691 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507351 4691 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507355 4691 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507359 4691 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507363 4691 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507368 4691 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507373 4691 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507377 4691 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507381 4691 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507385 4691 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507389 4691 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507393 4691 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507400 4691 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507404 4691 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507408 4691 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507411 4691 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507418 4691 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507421 4691 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507425 4691 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507429 4691 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507432 4691 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507436 4691 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507440 4691 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507466 4691 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507469 4691 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507473 4691 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507476 4691 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507480 4691 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507484 4691 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507487 4691 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507491 4691 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507494 4691 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507498 4691 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507501 4691 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507505 4691 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507508 4691 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507511 4691 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507515 4691 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507518 4691 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507521 4691 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507525 4691 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507528 4691 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507533 4691 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507536 4691 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507541 4691 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507545 4691 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507548 4691 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507553 4691 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507557 4691 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507560 4691 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507565 4691 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507568 4691 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507572 4691 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507577 4691 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.507585 4691 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507751 4691 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507759 4691 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507764 4691 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507768 4691 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507772 4691 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507776 4691 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507779 4691 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507782 4691 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507789 4691 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507792 4691 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507796 4691 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507801 4691 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507804 4691 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507808 4691 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507812 4691 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507817 4691 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507823 4691 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507829 4691 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507833 4691 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507838 4691 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507841 4691 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507846 4691 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507850 4691 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507854 4691 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507858 4691 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507862 4691 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507867 4691 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507872 4691 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507876 4691 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507880 4691 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507884 4691 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507887 4691 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507891 4691 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507895 4691 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507898 4691 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507902 4691 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507906 4691 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507911 4691 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507914 4691 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507918 4691 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507922 4691 feature_gate.go:330] unrecognized feature gate: Example Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507926 4691 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507930 4691 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507934 4691 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507937 4691 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507941 4691 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507945 4691 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507948 4691 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507952 4691 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507956 4691 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507959 4691 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507963 4691 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507967 4691 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507971 4691 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507974 4691 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507978 4691 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507982 4691 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507985 4691 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507988 4691 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507992 4691 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507995 4691 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.507999 4691 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.508002 4691 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.508005 4691 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.508009 4691 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.508012 4691 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.508017 4691 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.508021 4691 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.508024 4691 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.508028 4691 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.508031 4691 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.508036 4691 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.509922 4691 server.go:940] "Client rotation is on, will bootstrap in background" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.514217 4691 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.514324 4691 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.516585 4691 server.go:997] "Starting client certificate rotation" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.516616 4691 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.516857 4691 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-11-17 00:05:00.591858016 +0000 UTC Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.517017 4691 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.550718 4691 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.553000 4691 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 24 07:57:18 crc kubenswrapper[4691]: E1124 07:57:18.555338 4691 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.213:6443: connect: connection refused" logger="UnhandledError" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.583033 4691 log.go:25] "Validated CRI v1 runtime API" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.625967 4691 log.go:25] "Validated CRI v1 image API" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.628894 4691 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.636291 4691 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-24-07-53-08-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.636381 4691 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.664277 4691 manager.go:217] Machine: {Timestamp:2025-11-24 07:57:18.660610708 +0000 UTC m=+0.659559997 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654124544 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:5253ba1a-9775-49a3-ac2c-46321419cc02 BootID:8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c Filesystems:[{Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365408768 Type:vfs Inodes:821633 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108169 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827060224 Type:vfs Inodes:4108169 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:6e:e8:88 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:6e:e8:88 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:a6:e1:63 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:fd:ae:06 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:60:7a:74 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:1b:35:e2 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:4e:a1:0d:d8:d1:e8 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:66:29:59:66:d3:fe Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654124544 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.664716 4691 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.664984 4691 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.668855 4691 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.669142 4691 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.669194 4691 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.669578 4691 topology_manager.go:138] "Creating topology manager with none policy" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.669599 4691 container_manager_linux.go:303] "Creating device plugin manager" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.670222 4691 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.670279 4691 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.671287 4691 state_mem.go:36] "Initialized new in-memory state store" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.671436 4691 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.675614 4691 kubelet.go:418] "Attempting to sync node with API server" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.675649 4691 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.675687 4691 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.675708 4691 kubelet.go:324] "Adding apiserver pod source" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.675728 4691 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.681612 4691 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.683343 4691 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.683401 4691 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.213:6443: connect: connection refused Nov 24 07:57:18 crc kubenswrapper[4691]: E1124 07:57:18.683567 4691 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.213:6443: connect: connection refused" logger="UnhandledError" Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.683552 4691 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.213:6443: connect: connection refused Nov 24 07:57:18 crc kubenswrapper[4691]: E1124 07:57:18.683661 4691 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.213:6443: connect: connection refused" logger="UnhandledError" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.684631 4691 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.686743 4691 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.686775 4691 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.686787 4691 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.686797 4691 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.686813 4691 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.686824 4691 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.686833 4691 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.686848 4691 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.686859 4691 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.686869 4691 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.686904 4691 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.686915 4691 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.688393 4691 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.689075 4691 server.go:1280] "Started kubelet" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.690614 4691 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.213:6443: connect: connection refused Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.690616 4691 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.690629 4691 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 24 07:57:18 crc systemd[1]: Started Kubernetes Kubelet. Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.691602 4691 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.692862 4691 server.go:460] "Adding debug handlers to kubelet server" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.694224 4691 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.694302 4691 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.694526 4691 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-29 08:14:29.621897563 +0000 UTC Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.694637 4691 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 120h17m10.927264963s for next certificate rotation Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.694701 4691 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 24 07:57:18 crc kubenswrapper[4691]: E1124 07:57:18.694679 4691 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.694676 4691 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.694743 4691 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 24 07:57:18 crc kubenswrapper[4691]: E1124 07:57:18.699784 4691 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.213:6443: connect: connection refused" interval="200ms" Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.699880 4691 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.213:6443: connect: connection refused Nov 24 07:57:18 crc kubenswrapper[4691]: E1124 07:57:18.700018 4691 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.213:6443: connect: connection refused" logger="UnhandledError" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.701640 4691 factory.go:153] Registering CRI-O factory Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.701684 4691 factory.go:221] Registration of the crio container factory successfully Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.701831 4691 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.701922 4691 factory.go:55] Registering systemd factory Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.701969 4691 factory.go:221] Registration of the systemd container factory successfully Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.702012 4691 factory.go:103] Registering Raw factory Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.702042 4691 manager.go:1196] Started watching for new ooms in manager Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.705914 4691 manager.go:319] Starting recovery of all containers Nov 24 07:57:18 crc kubenswrapper[4691]: E1124 07:57:18.705586 4691 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.213:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187ae25053230bed default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-24 07:57:18.689037293 +0000 UTC m=+0.687986552,LastTimestamp:2025-11-24 07:57:18.689037293 +0000 UTC m=+0.687986552,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.718152 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.718228 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.718253 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.718277 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.718298 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.718321 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.718344 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.718366 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.718391 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.718415 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.718435 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.718497 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.718550 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.718574 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.718593 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.718615 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.718639 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.718659 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.718678 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.718701 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.718721 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.718741 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.718761 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.718782 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.718801 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.718876 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.718900 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.718922 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.718942 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.718964 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.718983 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719007 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719028 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719046 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719066 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719085 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719104 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719123 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719142 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719162 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719182 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719203 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719221 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719241 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719259 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719279 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719299 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719322 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719341 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719359 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719378 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719439 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719508 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719538 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719559 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719580 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719601 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719621 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719644 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719663 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719683 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719707 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719726 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719747 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719767 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719786 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719804 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719865 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719887 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719908 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719929 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719949 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719970 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.719989 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720008 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720027 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720046 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720065 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720085 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720105 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720126 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720148 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720170 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720189 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720208 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720225 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720245 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720264 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720283 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720303 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720323 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720341 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720360 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720378 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720399 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720422 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720440 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720499 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720520 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720541 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720561 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720580 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720600 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720620 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720700 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720726 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720750 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720771 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720795 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720816 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720837 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720862 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720883 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720905 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720925 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720944 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720965 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.720984 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721002 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721021 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721040 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721059 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721077 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721096 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721126 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721144 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721170 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721191 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721211 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721229 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721250 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721268 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721289 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721309 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721327 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721347 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721372 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721392 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721411 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721432 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721489 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721527 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721547 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721571 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721590 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721612 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721631 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721650 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721670 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721690 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721711 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721729 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721748 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721766 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721785 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721803 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721822 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721842 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721862 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721888 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721907 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721926 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721944 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.721998 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.722018 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.722036 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.722057 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.722076 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.722095 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.725793 4691 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.725920 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.725984 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.726018 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.726046 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.726104 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.726143 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.726162 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.726206 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.726233 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.726269 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.726315 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.726327 4691 manager.go:324] Recovery completed Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.726333 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.726376 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.726394 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.726414 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.726431 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.726463 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.726480 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.726496 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.726512 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.726533 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.726551 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.726567 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.726585 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.726602 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.726618 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.726636 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.726653 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.726669 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.726728 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.726748 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.726765 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.727705 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.727745 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.727765 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.727811 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.727827 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.727846 4691 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.727862 4691 reconstruct.go:97] "Volume reconstruction finished" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.727873 4691 reconciler.go:26] "Reconciler: start to sync state" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.744021 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.745735 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.745782 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.745800 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.746650 4691 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.746670 4691 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.746691 4691 state_mem.go:36] "Initialized new in-memory state store" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.757105 4691 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.759197 4691 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.759258 4691 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.759293 4691 kubelet.go:2335] "Starting kubelet main sync loop" Nov 24 07:57:18 crc kubenswrapper[4691]: E1124 07:57:18.759503 4691 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 24 07:57:18 crc kubenswrapper[4691]: W1124 07:57:18.763009 4691 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.213:6443: connect: connection refused Nov 24 07:57:18 crc kubenswrapper[4691]: E1124 07:57:18.763111 4691 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.213:6443: connect: connection refused" logger="UnhandledError" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.766421 4691 policy_none.go:49] "None policy: Start" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.767907 4691 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.767942 4691 state_mem.go:35] "Initializing new in-memory state store" Nov 24 07:57:18 crc kubenswrapper[4691]: E1124 07:57:18.795223 4691 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.831495 4691 manager.go:334] "Starting Device Plugin manager" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.832337 4691 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.832357 4691 server.go:79] "Starting device plugin registration server" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.832925 4691 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.832963 4691 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.833143 4691 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.833351 4691 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.833367 4691 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 24 07:57:18 crc kubenswrapper[4691]: E1124 07:57:18.842669 4691 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.860439 4691 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.860660 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.862855 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.862917 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.862934 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.863180 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.864226 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.864254 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.864265 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.864945 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.865027 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.865051 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.865091 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.864971 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.866046 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.866078 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.866093 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.866120 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.866139 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.866148 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.866247 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.866290 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.866316 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.866328 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.866399 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.866477 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.867288 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.867351 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.867363 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.867561 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.867633 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.867671 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.867677 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.867708 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.867685 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.868857 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.868896 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.868911 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.868924 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.868975 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.868991 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.869221 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.869256 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.870022 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.870052 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.870064 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:18 crc kubenswrapper[4691]: E1124 07:57:18.900638 4691 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.213:6443: connect: connection refused" interval="400ms" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.930418 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.930489 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.930517 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.930552 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.930602 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.930633 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.930663 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.930691 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.930807 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.930851 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.930879 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.930900 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.930928 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.930950 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.930967 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.933574 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.935004 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.935055 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.935070 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:18 crc kubenswrapper[4691]: I1124 07:57:18.935106 4691 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 24 07:57:18 crc kubenswrapper[4691]: E1124 07:57:18.935793 4691 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.213:6443: connect: connection refused" node="crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.032169 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.032247 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.032288 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.032321 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.032352 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.032382 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.032413 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.032403 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.032499 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.032551 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.032565 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.032588 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.032566 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.032543 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.032499 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.032467 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.032658 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.032677 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.032695 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.032709 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.032713 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.032746 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.032752 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.032726 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.032785 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.032830 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.032868 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.032908 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.032911 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.032936 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.136353 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.141887 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.141967 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.141987 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.142031 4691 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 24 07:57:19 crc kubenswrapper[4691]: E1124 07:57:19.142767 4691 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.213:6443: connect: connection refused" node="crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.197569 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.203014 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.220107 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.240238 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: W1124 07:57:19.244922 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-ccf07fe26837a7690ec68fdcf41f97259c49ca8e505fa1a60425946b92bb1e82 WatchSource:0}: Error finding container ccf07fe26837a7690ec68fdcf41f97259c49ca8e505fa1a60425946b92bb1e82: Status 404 returned error can't find the container with id ccf07fe26837a7690ec68fdcf41f97259c49ca8e505fa1a60425946b92bb1e82 Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.247057 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 07:57:19 crc kubenswrapper[4691]: W1124 07:57:19.247420 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-8fb96777c33474ae6c811cadfbfef8302fe066c61246b18ae75bafc873a79162 WatchSource:0}: Error finding container 8fb96777c33474ae6c811cadfbfef8302fe066c61246b18ae75bafc873a79162: Status 404 returned error can't find the container with id 8fb96777c33474ae6c811cadfbfef8302fe066c61246b18ae75bafc873a79162 Nov 24 07:57:19 crc kubenswrapper[4691]: W1124 07:57:19.251286 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-7be0b19480d27e7f3a4d8c3ef76d0b57c30d7e7e9ff30a59de38b67987c7f448 WatchSource:0}: Error finding container 7be0b19480d27e7f3a4d8c3ef76d0b57c30d7e7e9ff30a59de38b67987c7f448: Status 404 returned error can't find the container with id 7be0b19480d27e7f3a4d8c3ef76d0b57c30d7e7e9ff30a59de38b67987c7f448 Nov 24 07:57:19 crc kubenswrapper[4691]: W1124 07:57:19.255260 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-dcf0fe30f2e827cbf376b4f5e7d62f73ca3c632a5d3b07464caf595c4ac0e4d2 WatchSource:0}: Error finding container dcf0fe30f2e827cbf376b4f5e7d62f73ca3c632a5d3b07464caf595c4ac0e4d2: Status 404 returned error can't find the container with id dcf0fe30f2e827cbf376b4f5e7d62f73ca3c632a5d3b07464caf595c4ac0e4d2 Nov 24 07:57:19 crc kubenswrapper[4691]: W1124 07:57:19.260638 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-7c4c6a5b6fd05e9414ef531458993f99f6523bcbc12d3126acbee39f3b765e78 WatchSource:0}: Error finding container 7c4c6a5b6fd05e9414ef531458993f99f6523bcbc12d3126acbee39f3b765e78: Status 404 returned error can't find the container with id 7c4c6a5b6fd05e9414ef531458993f99f6523bcbc12d3126acbee39f3b765e78 Nov 24 07:57:19 crc kubenswrapper[4691]: E1124 07:57:19.301750 4691 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.213:6443: connect: connection refused" interval="800ms" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.543046 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.545109 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.545158 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.545171 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.545204 4691 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 24 07:57:19 crc kubenswrapper[4691]: E1124 07:57:19.545832 4691 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.213:6443: connect: connection refused" node="crc" Nov 24 07:57:19 crc kubenswrapper[4691]: W1124 07:57:19.573746 4691 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.213:6443: connect: connection refused Nov 24 07:57:19 crc kubenswrapper[4691]: E1124 07:57:19.573880 4691 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.213:6443: connect: connection refused" logger="UnhandledError" Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.691557 4691 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.213:6443: connect: connection refused Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.764886 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"7c4c6a5b6fd05e9414ef531458993f99f6523bcbc12d3126acbee39f3b765e78"} Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.766103 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"dcf0fe30f2e827cbf376b4f5e7d62f73ca3c632a5d3b07464caf595c4ac0e4d2"} Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.767404 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"7be0b19480d27e7f3a4d8c3ef76d0b57c30d7e7e9ff30a59de38b67987c7f448"} Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.768406 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"8fb96777c33474ae6c811cadfbfef8302fe066c61246b18ae75bafc873a79162"} Nov 24 07:57:19 crc kubenswrapper[4691]: I1124 07:57:19.769440 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"ccf07fe26837a7690ec68fdcf41f97259c49ca8e505fa1a60425946b92bb1e82"} Nov 24 07:57:19 crc kubenswrapper[4691]: W1124 07:57:19.870305 4691 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.213:6443: connect: connection refused Nov 24 07:57:19 crc kubenswrapper[4691]: E1124 07:57:19.870408 4691 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.213:6443: connect: connection refused" logger="UnhandledError" Nov 24 07:57:20 crc kubenswrapper[4691]: W1124 07:57:20.054684 4691 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.213:6443: connect: connection refused Nov 24 07:57:20 crc kubenswrapper[4691]: E1124 07:57:20.055247 4691 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.213:6443: connect: connection refused" logger="UnhandledError" Nov 24 07:57:20 crc kubenswrapper[4691]: E1124 07:57:20.103414 4691 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.213:6443: connect: connection refused" interval="1.6s" Nov 24 07:57:20 crc kubenswrapper[4691]: W1124 07:57:20.208535 4691 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.213:6443: connect: connection refused Nov 24 07:57:20 crc kubenswrapper[4691]: E1124 07:57:20.208675 4691 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.213:6443: connect: connection refused" logger="UnhandledError" Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.346558 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.348384 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.348429 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.348472 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.348501 4691 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 24 07:57:20 crc kubenswrapper[4691]: E1124 07:57:20.349142 4691 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.213:6443: connect: connection refused" node="crc" Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.669648 4691 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Nov 24 07:57:20 crc kubenswrapper[4691]: E1124 07:57:20.670962 4691 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.213:6443: connect: connection refused" logger="UnhandledError" Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.692289 4691 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.213:6443: connect: connection refused Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.775428 4691 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="58112d8ac5f25f3d4fca2a520c16125a2511e6a1d64db73aeea92d4a6aaf94d8" exitCode=0 Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.775601 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.775630 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"58112d8ac5f25f3d4fca2a520c16125a2511e6a1d64db73aeea92d4a6aaf94d8"} Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.776988 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.777044 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.777068 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.779603 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6"} Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.779631 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.779637 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337"} Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.779765 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932"} Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.779781 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6"} Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.780912 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.780940 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.780953 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.782033 4691 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102" exitCode=0 Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.782261 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.782263 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102"} Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.783540 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.783622 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.783648 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.784360 4691 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54" exitCode=0 Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.784478 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54"} Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.784510 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.785851 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.785938 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.785976 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.786668 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.788198 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.788255 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.788279 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.788287 4691 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="2040089a4ee1b2fc3f3f46bf7840d8d71b563a282616da13b2b3cf8e3007f931" exitCode=0 Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.788328 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"2040089a4ee1b2fc3f3f46bf7840d8d71b563a282616da13b2b3cf8e3007f931"} Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.788375 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.790016 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.790093 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:20 crc kubenswrapper[4691]: I1124 07:57:20.790114 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:21 crc kubenswrapper[4691]: W1124 07:57:21.654520 4691 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.213:6443: connect: connection refused Nov 24 07:57:21 crc kubenswrapper[4691]: E1124 07:57:21.654610 4691 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.213:6443: connect: connection refused" logger="UnhandledError" Nov 24 07:57:21 crc kubenswrapper[4691]: I1124 07:57:21.692177 4691 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.213:6443: connect: connection refused Nov 24 07:57:21 crc kubenswrapper[4691]: E1124 07:57:21.704564 4691 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.213:6443: connect: connection refused" interval="3.2s" Nov 24 07:57:21 crc kubenswrapper[4691]: W1124 07:57:21.715622 4691 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.213:6443: connect: connection refused Nov 24 07:57:21 crc kubenswrapper[4691]: E1124 07:57:21.715763 4691 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.213:6443: connect: connection refused" logger="UnhandledError" Nov 24 07:57:21 crc kubenswrapper[4691]: I1124 07:57:21.797378 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"c384d9f46f4574e44a82ec9df2a2700f39fecfaaffdd15506059c4085d89682c"} Nov 24 07:57:21 crc kubenswrapper[4691]: I1124 07:57:21.797495 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"142e19b1c2a5f2e3a2b0dfcf9220cf617333ffe8166d8a1abe10ac11c277f41d"} Nov 24 07:57:21 crc kubenswrapper[4691]: I1124 07:57:21.797493 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:21 crc kubenswrapper[4691]: I1124 07:57:21.797512 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"b7ef3b97a1e9ea8617dddcbbb0b43f3cbe2b27fc803429fc5d965b766852bb5c"} Nov 24 07:57:21 crc kubenswrapper[4691]: I1124 07:57:21.799132 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:21 crc kubenswrapper[4691]: I1124 07:57:21.799184 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:21 crc kubenswrapper[4691]: I1124 07:57:21.799200 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:21 crc kubenswrapper[4691]: I1124 07:57:21.804610 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839"} Nov 24 07:57:21 crc kubenswrapper[4691]: I1124 07:57:21.804679 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a"} Nov 24 07:57:21 crc kubenswrapper[4691]: I1124 07:57:21.804694 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e"} Nov 24 07:57:21 crc kubenswrapper[4691]: I1124 07:57:21.804730 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d"} Nov 24 07:57:21 crc kubenswrapper[4691]: I1124 07:57:21.808672 4691 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3" exitCode=0 Nov 24 07:57:21 crc kubenswrapper[4691]: I1124 07:57:21.808811 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3"} Nov 24 07:57:21 crc kubenswrapper[4691]: I1124 07:57:21.808840 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:21 crc kubenswrapper[4691]: I1124 07:57:21.810238 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:21 crc kubenswrapper[4691]: I1124 07:57:21.810276 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:21 crc kubenswrapper[4691]: I1124 07:57:21.810287 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:21 crc kubenswrapper[4691]: I1124 07:57:21.811967 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:21 crc kubenswrapper[4691]: I1124 07:57:21.812105 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:21 crc kubenswrapper[4691]: I1124 07:57:21.812366 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"952741e34a2f95082426fc2d094c5f68671d32a477997392462ca0c54c0686d6"} Nov 24 07:57:21 crc kubenswrapper[4691]: I1124 07:57:21.812704 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:21 crc kubenswrapper[4691]: I1124 07:57:21.812725 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:21 crc kubenswrapper[4691]: I1124 07:57:21.812736 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:21 crc kubenswrapper[4691]: I1124 07:57:21.813358 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:21 crc kubenswrapper[4691]: I1124 07:57:21.813420 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:21 crc kubenswrapper[4691]: I1124 07:57:21.813434 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:21 crc kubenswrapper[4691]: I1124 07:57:21.950260 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:21 crc kubenswrapper[4691]: I1124 07:57:21.954269 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:21 crc kubenswrapper[4691]: I1124 07:57:21.954356 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:21 crc kubenswrapper[4691]: I1124 07:57:21.954367 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:21 crc kubenswrapper[4691]: I1124 07:57:21.954397 4691 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 24 07:57:21 crc kubenswrapper[4691]: E1124 07:57:21.955087 4691 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.213:6443: connect: connection refused" node="crc" Nov 24 07:57:22 crc kubenswrapper[4691]: I1124 07:57:22.819085 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5"} Nov 24 07:57:22 crc kubenswrapper[4691]: I1124 07:57:22.819204 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:22 crc kubenswrapper[4691]: I1124 07:57:22.820276 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:22 crc kubenswrapper[4691]: I1124 07:57:22.820312 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:22 crc kubenswrapper[4691]: I1124 07:57:22.820325 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:22 crc kubenswrapper[4691]: I1124 07:57:22.823716 4691 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb" exitCode=0 Nov 24 07:57:22 crc kubenswrapper[4691]: I1124 07:57:22.823774 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb"} Nov 24 07:57:22 crc kubenswrapper[4691]: I1124 07:57:22.823837 4691 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 07:57:22 crc kubenswrapper[4691]: I1124 07:57:22.823859 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:22 crc kubenswrapper[4691]: I1124 07:57:22.823888 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:22 crc kubenswrapper[4691]: I1124 07:57:22.823890 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:22 crc kubenswrapper[4691]: I1124 07:57:22.824968 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:22 crc kubenswrapper[4691]: I1124 07:57:22.825013 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:22 crc kubenswrapper[4691]: I1124 07:57:22.825039 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:22 crc kubenswrapper[4691]: I1124 07:57:22.825219 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:22 crc kubenswrapper[4691]: I1124 07:57:22.825310 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:22 crc kubenswrapper[4691]: I1124 07:57:22.825328 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:22 crc kubenswrapper[4691]: I1124 07:57:22.825360 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:22 crc kubenswrapper[4691]: I1124 07:57:22.825378 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:22 crc kubenswrapper[4691]: I1124 07:57:22.825388 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:23 crc kubenswrapper[4691]: I1124 07:57:23.047859 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 07:57:23 crc kubenswrapper[4691]: I1124 07:57:23.048154 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:23 crc kubenswrapper[4691]: I1124 07:57:23.049760 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:23 crc kubenswrapper[4691]: I1124 07:57:23.049838 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:23 crc kubenswrapper[4691]: I1124 07:57:23.049860 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:23 crc kubenswrapper[4691]: I1124 07:57:23.056582 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 07:57:23 crc kubenswrapper[4691]: I1124 07:57:23.447148 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 07:57:23 crc kubenswrapper[4691]: I1124 07:57:23.480494 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 07:57:23 crc kubenswrapper[4691]: I1124 07:57:23.831438 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:23 crc kubenswrapper[4691]: I1124 07:57:23.831494 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:23 crc kubenswrapper[4691]: I1124 07:57:23.831544 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:23 crc kubenswrapper[4691]: I1124 07:57:23.831427 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1"} Nov 24 07:57:23 crc kubenswrapper[4691]: I1124 07:57:23.831603 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd"} Nov 24 07:57:23 crc kubenswrapper[4691]: I1124 07:57:23.831624 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 07:57:23 crc kubenswrapper[4691]: I1124 07:57:23.831638 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec"} Nov 24 07:57:23 crc kubenswrapper[4691]: I1124 07:57:23.831647 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8"} Nov 24 07:57:23 crc kubenswrapper[4691]: I1124 07:57:23.832618 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:23 crc kubenswrapper[4691]: I1124 07:57:23.832661 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:23 crc kubenswrapper[4691]: I1124 07:57:23.832677 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:23 crc kubenswrapper[4691]: I1124 07:57:23.833080 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:23 crc kubenswrapper[4691]: I1124 07:57:23.833121 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:23 crc kubenswrapper[4691]: I1124 07:57:23.833136 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:23 crc kubenswrapper[4691]: I1124 07:57:23.833767 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:23 crc kubenswrapper[4691]: I1124 07:57:23.833795 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:23 crc kubenswrapper[4691]: I1124 07:57:23.833809 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:24 crc kubenswrapper[4691]: I1124 07:57:24.841813 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e"} Nov 24 07:57:24 crc kubenswrapper[4691]: I1124 07:57:24.841919 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:24 crc kubenswrapper[4691]: I1124 07:57:24.841983 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:24 crc kubenswrapper[4691]: I1124 07:57:24.843363 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:24 crc kubenswrapper[4691]: I1124 07:57:24.843474 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:24 crc kubenswrapper[4691]: I1124 07:57:24.843510 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:24 crc kubenswrapper[4691]: I1124 07:57:24.843579 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:24 crc kubenswrapper[4691]: I1124 07:57:24.843614 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:24 crc kubenswrapper[4691]: I1124 07:57:24.843651 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:25 crc kubenswrapper[4691]: I1124 07:57:25.003131 4691 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Nov 24 07:57:25 crc kubenswrapper[4691]: I1124 07:57:25.155763 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:25 crc kubenswrapper[4691]: I1124 07:57:25.158990 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:25 crc kubenswrapper[4691]: I1124 07:57:25.159038 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:25 crc kubenswrapper[4691]: I1124 07:57:25.159055 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:25 crc kubenswrapper[4691]: I1124 07:57:25.159090 4691 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 24 07:57:25 crc kubenswrapper[4691]: I1124 07:57:25.845246 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:25 crc kubenswrapper[4691]: I1124 07:57:25.846365 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:25 crc kubenswrapper[4691]: I1124 07:57:25.846403 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:25 crc kubenswrapper[4691]: I1124 07:57:25.846412 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:26 crc kubenswrapper[4691]: I1124 07:57:26.392769 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 07:57:26 crc kubenswrapper[4691]: I1124 07:57:26.393024 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:26 crc kubenswrapper[4691]: I1124 07:57:26.398340 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:26 crc kubenswrapper[4691]: I1124 07:57:26.398406 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:26 crc kubenswrapper[4691]: I1124 07:57:26.398421 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:26 crc kubenswrapper[4691]: I1124 07:57:26.832398 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 07:57:26 crc kubenswrapper[4691]: I1124 07:57:26.849738 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:26 crc kubenswrapper[4691]: I1124 07:57:26.853860 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:26 crc kubenswrapper[4691]: I1124 07:57:26.853967 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:26 crc kubenswrapper[4691]: I1124 07:57:26.853985 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:27 crc kubenswrapper[4691]: I1124 07:57:27.179993 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 07:57:27 crc kubenswrapper[4691]: I1124 07:57:27.202025 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 24 07:57:27 crc kubenswrapper[4691]: I1124 07:57:27.202299 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:27 crc kubenswrapper[4691]: I1124 07:57:27.203788 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:27 crc kubenswrapper[4691]: I1124 07:57:27.203829 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:27 crc kubenswrapper[4691]: I1124 07:57:27.203840 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:27 crc kubenswrapper[4691]: I1124 07:57:27.472158 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 07:57:27 crc kubenswrapper[4691]: I1124 07:57:27.472435 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:27 crc kubenswrapper[4691]: I1124 07:57:27.473811 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:27 crc kubenswrapper[4691]: I1124 07:57:27.473847 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:27 crc kubenswrapper[4691]: I1124 07:57:27.473858 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:27 crc kubenswrapper[4691]: I1124 07:57:27.852486 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:27 crc kubenswrapper[4691]: I1124 07:57:27.853828 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:27 crc kubenswrapper[4691]: I1124 07:57:27.853890 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:27 crc kubenswrapper[4691]: I1124 07:57:27.853905 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:28 crc kubenswrapper[4691]: E1124 07:57:28.842767 4691 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 24 07:57:30 crc kubenswrapper[4691]: I1124 07:57:30.181226 4691 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 24 07:57:30 crc kubenswrapper[4691]: I1124 07:57:30.181364 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 24 07:57:32 crc kubenswrapper[4691]: I1124 07:57:32.449137 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 24 07:57:32 crc kubenswrapper[4691]: I1124 07:57:32.449320 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:32 crc kubenswrapper[4691]: I1124 07:57:32.450746 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:32 crc kubenswrapper[4691]: I1124 07:57:32.450793 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:32 crc kubenswrapper[4691]: I1124 07:57:32.450805 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:32 crc kubenswrapper[4691]: W1124 07:57:32.645123 4691 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 24 07:57:32 crc kubenswrapper[4691]: I1124 07:57:32.645252 4691 trace.go:236] Trace[752432631]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (24-Nov-2025 07:57:22.643) (total time: 10001ms): Nov 24 07:57:32 crc kubenswrapper[4691]: Trace[752432631]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (07:57:32.645) Nov 24 07:57:32 crc kubenswrapper[4691]: Trace[752432631]: [10.001893218s] [10.001893218s] END Nov 24 07:57:32 crc kubenswrapper[4691]: E1124 07:57:32.645278 4691 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 24 07:57:32 crc kubenswrapper[4691]: I1124 07:57:32.716014 4691 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 24 07:57:32 crc kubenswrapper[4691]: I1124 07:57:32.716135 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 24 07:57:32 crc kubenswrapper[4691]: I1124 07:57:32.721243 4691 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 24 07:57:32 crc kubenswrapper[4691]: I1124 07:57:32.721349 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 24 07:57:33 crc kubenswrapper[4691]: I1124 07:57:33.487822 4691 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 24 07:57:33 crc kubenswrapper[4691]: [+]log ok Nov 24 07:57:33 crc kubenswrapper[4691]: [+]etcd ok Nov 24 07:57:33 crc kubenswrapper[4691]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 24 07:57:33 crc kubenswrapper[4691]: [+]poststarthook/openshift.io-api-request-count-filter ok Nov 24 07:57:33 crc kubenswrapper[4691]: [+]poststarthook/openshift.io-startkubeinformers ok Nov 24 07:57:33 crc kubenswrapper[4691]: [+]poststarthook/openshift.io-openshift-apiserver-reachable ok Nov 24 07:57:33 crc kubenswrapper[4691]: [+]poststarthook/openshift.io-oauth-apiserver-reachable ok Nov 24 07:57:33 crc kubenswrapper[4691]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 24 07:57:33 crc kubenswrapper[4691]: [+]poststarthook/generic-apiserver-start-informers ok Nov 24 07:57:33 crc kubenswrapper[4691]: [+]poststarthook/priority-and-fairness-config-consumer ok Nov 24 07:57:33 crc kubenswrapper[4691]: [+]poststarthook/priority-and-fairness-filter ok Nov 24 07:57:33 crc kubenswrapper[4691]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 24 07:57:33 crc kubenswrapper[4691]: [+]poststarthook/start-apiextensions-informers ok Nov 24 07:57:33 crc kubenswrapper[4691]: [+]poststarthook/start-apiextensions-controllers ok Nov 24 07:57:33 crc kubenswrapper[4691]: [+]poststarthook/crd-informer-synced ok Nov 24 07:57:33 crc kubenswrapper[4691]: [+]poststarthook/start-system-namespaces-controller ok Nov 24 07:57:33 crc kubenswrapper[4691]: [+]poststarthook/start-cluster-authentication-info-controller ok Nov 24 07:57:33 crc kubenswrapper[4691]: [+]poststarthook/start-kube-apiserver-identity-lease-controller ok Nov 24 07:57:33 crc kubenswrapper[4691]: [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok Nov 24 07:57:33 crc kubenswrapper[4691]: [+]poststarthook/start-legacy-token-tracking-controller ok Nov 24 07:57:33 crc kubenswrapper[4691]: [+]poststarthook/start-service-ip-repair-controllers ok Nov 24 07:57:33 crc kubenswrapper[4691]: [-]poststarthook/rbac/bootstrap-roles failed: reason withheld Nov 24 07:57:33 crc kubenswrapper[4691]: [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld Nov 24 07:57:33 crc kubenswrapper[4691]: [+]poststarthook/priority-and-fairness-config-producer ok Nov 24 07:57:33 crc kubenswrapper[4691]: [+]poststarthook/bootstrap-controller ok Nov 24 07:57:33 crc kubenswrapper[4691]: [+]poststarthook/aggregator-reload-proxy-client-cert ok Nov 24 07:57:33 crc kubenswrapper[4691]: [+]poststarthook/start-kube-aggregator-informers ok Nov 24 07:57:33 crc kubenswrapper[4691]: [+]poststarthook/apiservice-status-local-available-controller ok Nov 24 07:57:33 crc kubenswrapper[4691]: [+]poststarthook/apiservice-status-remote-available-controller ok Nov 24 07:57:33 crc kubenswrapper[4691]: [+]poststarthook/apiservice-registration-controller ok Nov 24 07:57:33 crc kubenswrapper[4691]: [+]poststarthook/apiservice-wait-for-first-sync ok Nov 24 07:57:33 crc kubenswrapper[4691]: [+]poststarthook/apiservice-discovery-controller ok Nov 24 07:57:33 crc kubenswrapper[4691]: [+]poststarthook/kube-apiserver-autoregistration ok Nov 24 07:57:33 crc kubenswrapper[4691]: [+]autoregister-completion ok Nov 24 07:57:33 crc kubenswrapper[4691]: [+]poststarthook/apiservice-openapi-controller ok Nov 24 07:57:33 crc kubenswrapper[4691]: [+]poststarthook/apiservice-openapiv3-controller ok Nov 24 07:57:33 crc kubenswrapper[4691]: livez check failed Nov 24 07:57:33 crc kubenswrapper[4691]: I1124 07:57:33.487897 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 07:57:36 crc kubenswrapper[4691]: I1124 07:57:36.399434 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 07:57:36 crc kubenswrapper[4691]: I1124 07:57:36.400444 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:36 crc kubenswrapper[4691]: I1124 07:57:36.401906 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:36 crc kubenswrapper[4691]: I1124 07:57:36.401939 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:36 crc kubenswrapper[4691]: I1124 07:57:36.401951 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:36 crc kubenswrapper[4691]: I1124 07:57:36.474584 4691 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 24 07:57:36 crc kubenswrapper[4691]: I1124 07:57:36.687119 4691 apiserver.go:52] "Watching apiserver" Nov 24 07:57:36 crc kubenswrapper[4691]: I1124 07:57:36.693735 4691 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 24 07:57:36 crc kubenswrapper[4691]: I1124 07:57:36.694159 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h"] Nov 24 07:57:36 crc kubenswrapper[4691]: I1124 07:57:36.694762 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 07:57:36 crc kubenswrapper[4691]: I1124 07:57:36.694869 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:57:36 crc kubenswrapper[4691]: I1124 07:57:36.694989 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:57:36 crc kubenswrapper[4691]: E1124 07:57:36.695065 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:57:36 crc kubenswrapper[4691]: I1124 07:57:36.695109 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 07:57:36 crc kubenswrapper[4691]: E1124 07:57:36.695256 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:57:36 crc kubenswrapper[4691]: I1124 07:57:36.695713 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:57:36 crc kubenswrapper[4691]: E1124 07:57:36.695809 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:57:36 crc kubenswrapper[4691]: I1124 07:57:36.695883 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 07:57:36 crc kubenswrapper[4691]: I1124 07:57:36.697965 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 24 07:57:36 crc kubenswrapper[4691]: I1124 07:57:36.698432 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 24 07:57:36 crc kubenswrapper[4691]: I1124 07:57:36.698674 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 24 07:57:36 crc kubenswrapper[4691]: I1124 07:57:36.698807 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 24 07:57:36 crc kubenswrapper[4691]: I1124 07:57:36.698952 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 24 07:57:36 crc kubenswrapper[4691]: I1124 07:57:36.699153 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 24 07:57:36 crc kubenswrapper[4691]: I1124 07:57:36.699376 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 24 07:57:36 crc kubenswrapper[4691]: I1124 07:57:36.699422 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 24 07:57:36 crc kubenswrapper[4691]: I1124 07:57:36.700127 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 24 07:57:36 crc kubenswrapper[4691]: I1124 07:57:36.734804 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 07:57:36 crc kubenswrapper[4691]: I1124 07:57:36.757698 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 07:57:36 crc kubenswrapper[4691]: I1124 07:57:36.772130 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 07:57:36 crc kubenswrapper[4691]: I1124 07:57:36.784343 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 07:57:36 crc kubenswrapper[4691]: I1124 07:57:36.795587 4691 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 24 07:57:36 crc kubenswrapper[4691]: I1124 07:57:36.798786 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 07:57:36 crc kubenswrapper[4691]: I1124 07:57:36.808644 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 07:57:36 crc kubenswrapper[4691]: I1124 07:57:36.820234 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 07:57:37 crc kubenswrapper[4691]: E1124 07:57:37.694089 4691 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.697768 4691 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.698710 4691 trace.go:236] Trace[842944958]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (24-Nov-2025 07:57:22.788) (total time: 14909ms): Nov 24 07:57:37 crc kubenswrapper[4691]: Trace[842944958]: ---"Objects listed" error: 14909ms (07:57:37.698) Nov 24 07:57:37 crc kubenswrapper[4691]: Trace[842944958]: [14.909963441s] [14.909963441s] END Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.698735 4691 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.699023 4691 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 24 07:57:37 crc kubenswrapper[4691]: E1124 07:57:37.699915 4691 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.700916 4691 trace.go:236] Trace[1958410513]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (24-Nov-2025 07:57:26.071) (total time: 11629ms): Nov 24 07:57:37 crc kubenswrapper[4691]: Trace[1958410513]: ---"Objects listed" error: 11629ms (07:57:37.700) Nov 24 07:57:37 crc kubenswrapper[4691]: Trace[1958410513]: [11.629618608s] [11.629618608s] END Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.700940 4691 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.720075 4691 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.728118 4691 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:47866->192.168.126.11:17697: read: connection reset by peer" start-of-body= Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.728168 4691 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:47852->192.168.126.11:17697: read: connection reset by peer" start-of-body= Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.728200 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:47866->192.168.126.11:17697: read: connection reset by peer" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.728240 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:47852->192.168.126.11:17697: read: connection reset by peer" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.759492 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:57:37 crc kubenswrapper[4691]: E1124 07:57:37.759631 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.798275 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.798319 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.798354 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.798371 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.798388 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.798407 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.798440 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.798558 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.798578 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.798595 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.798611 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.798643 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.798662 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.798678 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.798747 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.798763 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.798822 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.798869 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.798884 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.798898 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.798913 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.798944 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.798961 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.798977 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.799031 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.799052 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.799022 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.799054 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.799073 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.799139 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.799195 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.799264 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.799322 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.799385 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.799433 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.799564 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.799602 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.799634 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.799669 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.799700 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.799737 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.799774 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.799810 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.799841 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.799875 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.799979 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.800012 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.800056 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.800101 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.800155 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.800206 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.800355 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.800417 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.800496 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.800588 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.800622 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.800661 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.800695 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.800730 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.800763 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.800793 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.800826 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.800860 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.800895 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.800926 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.800960 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.800996 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.801036 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.801086 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.801131 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.801181 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.799154 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.799715 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.799828 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.801224 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.799972 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.801228 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.800313 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.800830 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.801280 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.801191 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.801363 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.801355 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.801416 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.801486 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.801511 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.801473 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: E1124 07:57:37.801636 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 07:57:38.30160489 +0000 UTC m=+20.300554179 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.801664 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.801696 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.801707 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.801765 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.801831 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.801887 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.801936 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.801973 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.802012 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.802048 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.802086 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.802126 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.802166 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.802204 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.802242 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.802280 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.802329 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.802366 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.802401 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.802434 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.802505 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.802542 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.802588 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.802624 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.802659 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.802693 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.802727 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.802765 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.802805 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.802856 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.802905 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.802939 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.802978 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.803016 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.803051 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.803087 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.803123 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.803161 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.803204 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.803242 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.803276 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.803313 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.803347 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.803382 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.803438 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.803543 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.803583 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.803619 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.803654 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.803688 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.803724 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.803759 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.803793 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.803829 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.803882 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.803929 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.803967 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.804557 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.804596 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.804634 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.804671 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.804707 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.804745 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.804781 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.804816 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.804858 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.804902 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.804955 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.805010 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.805614 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.805662 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.805705 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.805741 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.805778 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.805814 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.805855 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.805892 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.805934 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.805990 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.806031 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.806069 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.806109 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.806146 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.806184 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.806225 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.806260 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.806302 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.806345 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.806382 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.806422 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.806490 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.806529 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.806566 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.806605 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.801836 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.801974 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.802803 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.803039 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.803331 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.807028 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.807077 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.807121 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.807158 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.807192 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.807230 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.807579 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.807631 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.807668 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.807709 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.807747 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.807784 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.807822 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.807858 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.807894 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.808169 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.808221 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.808268 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.808294 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.808311 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.808382 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.808484 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.808516 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.808546 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.808574 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.808515 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.808602 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.808599 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.808688 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.808915 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.808958 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.808998 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.809042 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.809052 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.809103 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.809143 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.809217 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.809266 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.809310 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.809353 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.809398 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.809440 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.809510 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.809546 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.809683 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.809723 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.809766 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.809806 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.809845 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.809886 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.809980 4691 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.810006 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.810035 4691 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.810067 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.810093 4691 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.810114 4691 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.810133 4691 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.810340 4691 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.810374 4691 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.810407 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.810502 4691 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.810525 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.811216 4691 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.811244 4691 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.811265 4691 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.811288 4691 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.811307 4691 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.811328 4691 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.811349 4691 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.811371 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.811395 4691 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.811418 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.811439 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.811560 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.811583 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.809026 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.809104 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.803962 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.804075 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.804131 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.804269 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.804362 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.804831 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.805062 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.805741 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.805846 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.806067 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.806080 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.806129 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.806341 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.806357 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.806366 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.806372 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.809222 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.809243 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.809384 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.803682 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.809920 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.810055 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.810437 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.810407 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.810731 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.810849 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.811311 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.811322 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.811489 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.811403 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.811838 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.811925 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.812179 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.812241 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.812260 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.812406 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.812497 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: E1124 07:57:37.813308 4691 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 07:57:37 crc kubenswrapper[4691]: E1124 07:57:37.813410 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 07:57:38.313384748 +0000 UTC m=+20.312334107 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.813581 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.813735 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.813602 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.814208 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.814366 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.814681 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.814706 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.814745 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.814760 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.815022 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.815223 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.815221 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.815600 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.815645 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.815868 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.815955 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.816025 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.816077 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.816199 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.816286 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.816267 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.816879 4691 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 24 07:57:37 crc kubenswrapper[4691]: E1124 07:57:37.816998 4691 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.817084 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.817287 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.817324 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.817528 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.817721 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.817863 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: E1124 07:57:37.817985 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 07:57:38.317914635 +0000 UTC m=+20.316863924 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.818521 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.818595 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.818743 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.818795 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.819068 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.818940 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.819853 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.819886 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.819919 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.819945 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.820011 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.820202 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.820309 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.820490 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.820753 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.827681 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.828090 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.828473 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.828756 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.828722 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.828809 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.828958 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.829285 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.829596 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.829784 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.830035 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.830077 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.830229 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.830458 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.830544 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.831258 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.831495 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.833252 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.833472 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.833962 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.834222 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.834545 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.834648 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.834857 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.835121 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.835221 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.835773 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: E1124 07:57:37.835817 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 07:57:37 crc kubenswrapper[4691]: E1124 07:57:37.836167 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 07:57:37 crc kubenswrapper[4691]: E1124 07:57:37.836185 4691 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:57:37 crc kubenswrapper[4691]: E1124 07:57:37.836268 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-24 07:57:38.336248087 +0000 UTC m=+20.335197346 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.836127 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.834948 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.835276 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.835941 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.836629 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.836747 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.836862 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.836872 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.836923 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.836959 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: E1124 07:57:37.837070 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 07:57:37 crc kubenswrapper[4691]: E1124 07:57:37.837096 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 07:57:37 crc kubenswrapper[4691]: E1124 07:57:37.837118 4691 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.837161 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: E1124 07:57:37.837215 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-24 07:57:38.337188963 +0000 UTC m=+20.336138252 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.837111 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.837252 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.837936 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.838456 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.838585 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.838626 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.838806 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.839000 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.839010 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.839081 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.839105 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.839336 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.839680 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.839674 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.839852 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.842425 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.842495 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.842622 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.842806 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.843033 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.843073 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.843180 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.843419 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.843674 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.843706 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.843799 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.844054 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.844080 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.844307 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.844237 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.846376 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.847207 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.847701 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.847746 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.847918 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.848267 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.848537 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.848673 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.849179 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.849268 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.849284 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.849568 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.849580 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.849729 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.850720 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.851221 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.851657 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.852698 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.853738 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.853615 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.854098 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.856765 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.860719 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.864029 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.880942 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.883639 4691 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5" exitCode=255 Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.883673 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5"} Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.888084 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.890930 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.895411 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.904472 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.911237 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912007 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912026 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912067 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912103 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912139 4691 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912155 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912168 4691 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912180 4691 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912192 4691 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912204 4691 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912215 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912227 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912240 4691 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912253 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912265 4691 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912278 4691 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912290 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912296 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912302 4691 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912343 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912356 4691 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912365 4691 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912373 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912382 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912390 4691 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912399 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912407 4691 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912415 4691 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912424 4691 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912432 4691 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912442 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912473 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912483 4691 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912495 4691 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912503 4691 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912513 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912523 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912531 4691 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912542 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912551 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912560 4691 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912568 4691 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912576 4691 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912584 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912593 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912602 4691 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912611 4691 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912619 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912628 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912636 4691 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912645 4691 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912652 4691 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912661 4691 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912670 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912678 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912687 4691 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912694 4691 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912703 4691 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912710 4691 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912719 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912729 4691 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912737 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912745 4691 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912753 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912761 4691 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912769 4691 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912777 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912786 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912794 4691 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912802 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912811 4691 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912819 4691 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912827 4691 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912838 4691 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912847 4691 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912858 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912865 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912874 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912882 4691 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912890 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912898 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912945 4691 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912952 4691 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912961 4691 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912969 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912978 4691 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912986 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.912995 4691 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913004 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913012 4691 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913023 4691 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913031 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913039 4691 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913047 4691 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913055 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913063 4691 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913072 4691 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913080 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913088 4691 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913096 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913104 4691 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913112 4691 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913120 4691 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913128 4691 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913135 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913143 4691 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913151 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913159 4691 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913169 4691 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913177 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913185 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913193 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913203 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913211 4691 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913219 4691 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913228 4691 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913236 4691 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913244 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913252 4691 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913260 4691 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913268 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913283 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913292 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913300 4691 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913307 4691 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913315 4691 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913323 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913331 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913356 4691 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913364 4691 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913372 4691 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913405 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913415 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913423 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913433 4691 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913441 4691 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913467 4691 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913476 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913485 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913493 4691 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913500 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913508 4691 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913517 4691 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913525 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913533 4691 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913542 4691 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913550 4691 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913558 4691 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913566 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913574 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913582 4691 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913590 4691 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913599 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913607 4691 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913617 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913626 4691 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913633 4691 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913642 4691 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913650 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913658 4691 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913666 4691 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913674 4691 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913682 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913690 4691 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913697 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913705 4691 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913713 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913720 4691 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913728 4691 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913736 4691 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913744 4691 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913752 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913761 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.913768 4691 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.915912 4691 scope.go:117] "RemoveContainer" containerID="9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.916664 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.919371 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.922319 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.929401 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.932025 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 07:57:37 crc kubenswrapper[4691]: I1124 07:57:37.941738 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 07:57:37 crc kubenswrapper[4691]: W1124 07:57:37.955022 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-dcdb2553d646077628c33d29ae20f98ecaacc5d726f29d8edb6d073767661b56 WatchSource:0}: Error finding container dcdb2553d646077628c33d29ae20f98ecaacc5d726f29d8edb6d073767661b56: Status 404 returned error can't find the container with id dcdb2553d646077628c33d29ae20f98ecaacc5d726f29d8edb6d073767661b56 Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.316066 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.316128 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:57:38 crc kubenswrapper[4691]: E1124 07:57:38.316224 4691 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 07:57:38 crc kubenswrapper[4691]: E1124 07:57:38.316248 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 07:57:39.316223196 +0000 UTC m=+21.315172465 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 07:57:38 crc kubenswrapper[4691]: E1124 07:57:38.316277 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 07:57:39.316265577 +0000 UTC m=+21.315214846 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.417578 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.417658 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.417708 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:57:38 crc kubenswrapper[4691]: E1124 07:57:38.417802 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 07:57:38 crc kubenswrapper[4691]: E1124 07:57:38.417829 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 07:57:38 crc kubenswrapper[4691]: E1124 07:57:38.417866 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 07:57:38 crc kubenswrapper[4691]: E1124 07:57:38.417876 4691 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:57:38 crc kubenswrapper[4691]: E1124 07:57:38.417938 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-24 07:57:39.417923575 +0000 UTC m=+21.416872824 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:57:38 crc kubenswrapper[4691]: E1124 07:57:38.417837 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 07:57:38 crc kubenswrapper[4691]: E1124 07:57:38.418217 4691 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:57:38 crc kubenswrapper[4691]: E1124 07:57:38.417806 4691 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 07:57:38 crc kubenswrapper[4691]: E1124 07:57:38.418295 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-24 07:57:39.418274024 +0000 UTC m=+21.417223273 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:57:38 crc kubenswrapper[4691]: E1124 07:57:38.418319 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 07:57:39.418309965 +0000 UTC m=+21.417259324 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.487616 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.499864 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.508337 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.517762 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.531115 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.545084 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.556807 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.573531 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.584306 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.588703 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.593356 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.596566 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.615648 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.629404 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.646056 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.661789 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.677280 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.693776 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.709911 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.724580 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.743179 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.757986 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.760211 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.760219 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:57:38 crc kubenswrapper[4691]: E1124 07:57:38.760372 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:57:38 crc kubenswrapper[4691]: E1124 07:57:38.760544 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.764874 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.765532 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.767167 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.767796 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.768865 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.770241 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.770941 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.772124 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.772828 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.773904 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.774597 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.776819 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.777501 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.778335 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.779748 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.780867 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.783124 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.783835 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.785054 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.785108 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.786501 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.787103 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.788751 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.790206 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.791464 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.792041 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.792812 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.794376 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.795032 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.797056 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.798550 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.800667 4691 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.800798 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.800763 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.805337 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.805943 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.806698 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.807845 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.808573 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.809094 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.809818 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.812844 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.813473 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.814404 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.815250 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.816178 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.816881 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.817584 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.818166 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.818976 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.819509 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.820000 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.821839 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.823108 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.824116 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.824873 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.825324 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.852710 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.869391 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.886431 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.891468 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a"} Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.891540 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"c07a1bf3c18dc8198cc6be5f6a5b8575f852e2f616ea8e8bcececf76fb89d517"} Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.893705 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be"} Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.893746 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f"} Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.893761 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"b9675096a4b9b41a27bfd5a0c52c1d5dd2dbbeb10e0d44c6d6a507885b4f7337"} Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.898683 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.900522 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.902171 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484"} Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.902594 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.903948 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"dcdb2553d646077628c33d29ae20f98ecaacc5d726f29d8edb6d073767661b56"} Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.909467 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.914888 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.930717 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.948045 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.967628 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:38 crc kubenswrapper[4691]: I1124 07:57:38.985259 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:39 crc kubenswrapper[4691]: I1124 07:57:39.004022 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:39Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:39 crc kubenswrapper[4691]: I1124 07:57:39.027466 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:39Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:39 crc kubenswrapper[4691]: I1124 07:57:39.047498 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:39Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:39 crc kubenswrapper[4691]: I1124 07:57:39.066490 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:39Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:39 crc kubenswrapper[4691]: I1124 07:57:39.084137 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:39Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:39 crc kubenswrapper[4691]: I1124 07:57:39.107008 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:39Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:39 crc kubenswrapper[4691]: I1124 07:57:39.124044 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:39Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:39 crc kubenswrapper[4691]: I1124 07:57:39.139190 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:39Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:39 crc kubenswrapper[4691]: I1124 07:57:39.324909 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 07:57:39 crc kubenswrapper[4691]: E1124 07:57:39.325066 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 07:57:41.325045727 +0000 UTC m=+23.323994976 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 07:57:39 crc kubenswrapper[4691]: I1124 07:57:39.325104 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:57:39 crc kubenswrapper[4691]: E1124 07:57:39.325206 4691 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 07:57:39 crc kubenswrapper[4691]: E1124 07:57:39.325238 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 07:57:41.325231932 +0000 UTC m=+23.324181181 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 07:57:39 crc kubenswrapper[4691]: I1124 07:57:39.425707 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:57:39 crc kubenswrapper[4691]: I1124 07:57:39.425780 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:57:39 crc kubenswrapper[4691]: I1124 07:57:39.425814 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:57:39 crc kubenswrapper[4691]: E1124 07:57:39.425921 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 07:57:39 crc kubenswrapper[4691]: E1124 07:57:39.425918 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 07:57:39 crc kubenswrapper[4691]: E1124 07:57:39.425947 4691 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 07:57:39 crc kubenswrapper[4691]: E1124 07:57:39.425969 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 07:57:39 crc kubenswrapper[4691]: E1124 07:57:39.426053 4691 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:57:39 crc kubenswrapper[4691]: E1124 07:57:39.425939 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 07:57:39 crc kubenswrapper[4691]: E1124 07:57:39.426137 4691 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:57:39 crc kubenswrapper[4691]: E1124 07:57:39.426058 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 07:57:41.426018826 +0000 UTC m=+23.424968115 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 07:57:39 crc kubenswrapper[4691]: E1124 07:57:39.426183 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-24 07:57:41.42615956 +0000 UTC m=+23.425108849 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:57:39 crc kubenswrapper[4691]: E1124 07:57:39.426213 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-24 07:57:41.426196611 +0000 UTC m=+23.425145900 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:57:39 crc kubenswrapper[4691]: I1124 07:57:39.759974 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:57:39 crc kubenswrapper[4691]: E1124 07:57:39.760150 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:57:40 crc kubenswrapper[4691]: I1124 07:57:40.759983 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:57:40 crc kubenswrapper[4691]: I1124 07:57:40.760020 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:57:40 crc kubenswrapper[4691]: E1124 07:57:40.760955 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:57:40 crc kubenswrapper[4691]: E1124 07:57:40.761088 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:57:40 crc kubenswrapper[4691]: I1124 07:57:40.911770 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20"} Nov 24 07:57:40 crc kubenswrapper[4691]: I1124 07:57:40.933868 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:40Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:40 crc kubenswrapper[4691]: I1124 07:57:40.949983 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:40Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:40 crc kubenswrapper[4691]: I1124 07:57:40.966460 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:40Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:40 crc kubenswrapper[4691]: I1124 07:57:40.987012 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:40Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.008219 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:41Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.026557 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:41Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.043470 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:41Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.066499 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:41Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.243751 4691 csr.go:261] certificate signing request csr-dntn4 is approved, waiting to be issued Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.276213 4691 csr.go:257] certificate signing request csr-dntn4 is issued Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.343589 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.343667 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:57:41 crc kubenswrapper[4691]: E1124 07:57:41.343823 4691 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 07:57:41 crc kubenswrapper[4691]: E1124 07:57:41.343901 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 07:57:45.343847694 +0000 UTC m=+27.342796953 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 07:57:41 crc kubenswrapper[4691]: E1124 07:57:41.343951 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 07:57:45.343942156 +0000 UTC m=+27.342891405 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.444551 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.444620 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.444671 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:57:41 crc kubenswrapper[4691]: E1124 07:57:41.444689 4691 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 07:57:41 crc kubenswrapper[4691]: E1124 07:57:41.444781 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 07:57:45.444759741 +0000 UTC m=+27.443708990 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 07:57:41 crc kubenswrapper[4691]: E1124 07:57:41.444852 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 07:57:41 crc kubenswrapper[4691]: E1124 07:57:41.444872 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 07:57:41 crc kubenswrapper[4691]: E1124 07:57:41.444873 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 07:57:41 crc kubenswrapper[4691]: E1124 07:57:41.444918 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 07:57:41 crc kubenswrapper[4691]: E1124 07:57:41.444936 4691 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:57:41 crc kubenswrapper[4691]: E1124 07:57:41.444888 4691 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:57:41 crc kubenswrapper[4691]: E1124 07:57:41.445000 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-24 07:57:45.444979927 +0000 UTC m=+27.443929176 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:57:41 crc kubenswrapper[4691]: E1124 07:57:41.445069 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-24 07:57:45.445046659 +0000 UTC m=+27.443996068 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.698288 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-frdx5"] Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.698859 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-fcwmc"] Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.699016 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-frdx5" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.699218 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.701629 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.701763 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.701811 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.701899 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.701921 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.701973 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.702000 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.702169 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.715638 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:41Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.727808 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:41Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.743116 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:41Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.747000 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/54ccc455-9127-4afd-b3a4-7fc35181bf93-mcd-auth-proxy-config\") pod \"machine-config-daemon-fcwmc\" (UID: \"54ccc455-9127-4afd-b3a4-7fc35181bf93\") " pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.747044 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/54ccc455-9127-4afd-b3a4-7fc35181bf93-proxy-tls\") pod \"machine-config-daemon-fcwmc\" (UID: \"54ccc455-9127-4afd-b3a4-7fc35181bf93\") " pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.747064 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fxkfc\" (UniqueName: \"kubernetes.io/projected/54ccc455-9127-4afd-b3a4-7fc35181bf93-kube-api-access-fxkfc\") pod \"machine-config-daemon-fcwmc\" (UID: \"54ccc455-9127-4afd-b3a4-7fc35181bf93\") " pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.747081 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/54ccc455-9127-4afd-b3a4-7fc35181bf93-rootfs\") pod \"machine-config-daemon-fcwmc\" (UID: \"54ccc455-9127-4afd-b3a4-7fc35181bf93\") " pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.747108 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9cf4x\" (UniqueName: \"kubernetes.io/projected/b886b151-658b-493c-b186-658ca0533f06-kube-api-access-9cf4x\") pod \"node-resolver-frdx5\" (UID: \"b886b151-658b-493c-b186-658ca0533f06\") " pod="openshift-dns/node-resolver-frdx5" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.747145 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/b886b151-658b-493c-b186-658ca0533f06-hosts-file\") pod \"node-resolver-frdx5\" (UID: \"b886b151-658b-493c-b186-658ca0533f06\") " pod="openshift-dns/node-resolver-frdx5" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.759519 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:41Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.759580 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:57:41 crc kubenswrapper[4691]: E1124 07:57:41.759895 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.775880 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:41Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.798115 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:41Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.825636 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:41Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.848585 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9cf4x\" (UniqueName: \"kubernetes.io/projected/b886b151-658b-493c-b186-658ca0533f06-kube-api-access-9cf4x\") pod \"node-resolver-frdx5\" (UID: \"b886b151-658b-493c-b186-658ca0533f06\") " pod="openshift-dns/node-resolver-frdx5" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.848640 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/b886b151-658b-493c-b186-658ca0533f06-hosts-file\") pod \"node-resolver-frdx5\" (UID: \"b886b151-658b-493c-b186-658ca0533f06\") " pod="openshift-dns/node-resolver-frdx5" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.848676 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/54ccc455-9127-4afd-b3a4-7fc35181bf93-mcd-auth-proxy-config\") pod \"machine-config-daemon-fcwmc\" (UID: \"54ccc455-9127-4afd-b3a4-7fc35181bf93\") " pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.848695 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/54ccc455-9127-4afd-b3a4-7fc35181bf93-proxy-tls\") pod \"machine-config-daemon-fcwmc\" (UID: \"54ccc455-9127-4afd-b3a4-7fc35181bf93\") " pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.848717 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/54ccc455-9127-4afd-b3a4-7fc35181bf93-rootfs\") pod \"machine-config-daemon-fcwmc\" (UID: \"54ccc455-9127-4afd-b3a4-7fc35181bf93\") " pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.848736 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fxkfc\" (UniqueName: \"kubernetes.io/projected/54ccc455-9127-4afd-b3a4-7fc35181bf93-kube-api-access-fxkfc\") pod \"machine-config-daemon-fcwmc\" (UID: \"54ccc455-9127-4afd-b3a4-7fc35181bf93\") " pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.848868 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/b886b151-658b-493c-b186-658ca0533f06-hosts-file\") pod \"node-resolver-frdx5\" (UID: \"b886b151-658b-493c-b186-658ca0533f06\") " pod="openshift-dns/node-resolver-frdx5" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.848889 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/54ccc455-9127-4afd-b3a4-7fc35181bf93-rootfs\") pod \"machine-config-daemon-fcwmc\" (UID: \"54ccc455-9127-4afd-b3a4-7fc35181bf93\") " pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.849518 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/54ccc455-9127-4afd-b3a4-7fc35181bf93-mcd-auth-proxy-config\") pod \"machine-config-daemon-fcwmc\" (UID: \"54ccc455-9127-4afd-b3a4-7fc35181bf93\") " pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.854305 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:41Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.856966 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/54ccc455-9127-4afd-b3a4-7fc35181bf93-proxy-tls\") pod \"machine-config-daemon-fcwmc\" (UID: \"54ccc455-9127-4afd-b3a4-7fc35181bf93\") " pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.876215 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9cf4x\" (UniqueName: \"kubernetes.io/projected/b886b151-658b-493c-b186-658ca0533f06-kube-api-access-9cf4x\") pod \"node-resolver-frdx5\" (UID: \"b886b151-658b-493c-b186-658ca0533f06\") " pod="openshift-dns/node-resolver-frdx5" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.876224 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fxkfc\" (UniqueName: \"kubernetes.io/projected/54ccc455-9127-4afd-b3a4-7fc35181bf93-kube-api-access-fxkfc\") pod \"machine-config-daemon-fcwmc\" (UID: \"54ccc455-9127-4afd-b3a4-7fc35181bf93\") " pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.887515 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:41Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.902511 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:41Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.917540 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:41Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.931708 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:41Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.953513 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:41Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.964365 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:41Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.982648 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:41Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:41 crc kubenswrapper[4691]: I1124 07:57:41.996038 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:41Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.010681 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.014787 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-frdx5" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.021624 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 07:57:42 crc kubenswrapper[4691]: W1124 07:57:42.026246 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb886b151_658b_493c_b186_658ca0533f06.slice/crio-59ada43275f8073e32853001f3d4dcc2d2d8c3d08f62fe8d3fa74c1877948bf6 WatchSource:0}: Error finding container 59ada43275f8073e32853001f3d4dcc2d2d8c3d08f62fe8d3fa74c1877948bf6: Status 404 returned error can't find the container with id 59ada43275f8073e32853001f3d4dcc2d2d8c3d08f62fe8d3fa74c1877948bf6 Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.034911 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.061931 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.084040 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.113963 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-zw5l9"] Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.114641 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-gxxrf"] Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.114868 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.115004 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.115336 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-6f24c"] Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.116792 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.117227 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.117593 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.118126 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.118329 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.118474 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.118553 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.119273 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.121657 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.121757 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.121835 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.122021 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.121663 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.122131 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.122375 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.135399 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.150559 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.150602 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-host-run-k8s-cni-cncf-io\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.151155 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-host-var-lib-cni-bin\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.151953 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-host-var-lib-cni-multus\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.152025 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-node-log\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.152103 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/27140986-dd30-4f6b-beac-d173dca9a94c-system-cni-dir\") pod \"multus-additional-cni-plugins-zw5l9\" (UID: \"27140986-dd30-4f6b-beac-d173dca9a94c\") " pod="openshift-multus/multus-additional-cni-plugins-zw5l9" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.152129 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-etc-kubernetes\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.152191 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-cni-bin\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.152240 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.152265 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zpl5k\" (UniqueName: \"kubernetes.io/projected/27140986-dd30-4f6b-beac-d173dca9a94c-kube-api-access-zpl5k\") pod \"multus-additional-cni-plugins-zw5l9\" (UID: \"27140986-dd30-4f6b-beac-d173dca9a94c\") " pod="openshift-multus/multus-additional-cni-plugins-zw5l9" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.152288 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-system-cni-dir\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.152306 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-var-lib-openvswitch\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.152322 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-cni-netd\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.152342 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-host-var-lib-kubelet\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.152362 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-multus-conf-dir\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.152418 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jhm5j\" (UniqueName: \"kubernetes.io/projected/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-kube-api-access-jhm5j\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.152474 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/106a6e78-a004-4232-a0a2-efecf2f7c248-ovnkube-script-lib\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.152503 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-kubelet\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.152529 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-log-socket\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.152549 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/106a6e78-a004-4232-a0a2-efecf2f7c248-env-overrides\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.152572 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/106a6e78-a004-4232-a0a2-efecf2f7c248-ovn-node-metrics-cert\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.152592 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/27140986-dd30-4f6b-beac-d173dca9a94c-cnibin\") pod \"multus-additional-cni-plugins-zw5l9\" (UID: \"27140986-dd30-4f6b-beac-d173dca9a94c\") " pod="openshift-multus/multus-additional-cni-plugins-zw5l9" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.152626 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-hostroot\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.152662 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-run-ovn-kubernetes\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.152705 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-host-run-netns\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.152725 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-host-run-multus-certs\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.152755 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-run-openvswitch\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.152790 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-cnibin\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.152811 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-run-netns\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.152836 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-run-systemd\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.152862 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-systemd-units\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.152883 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-etc-openvswitch\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.152929 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-djg58\" (UniqueName: \"kubernetes.io/projected/106a6e78-a004-4232-a0a2-efecf2f7c248-kube-api-access-djg58\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.152949 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/27140986-dd30-4f6b-beac-d173dca9a94c-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-zw5l9\" (UID: \"27140986-dd30-4f6b-beac-d173dca9a94c\") " pod="openshift-multus/multus-additional-cni-plugins-zw5l9" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.152972 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-cni-binary-copy\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.152998 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-multus-socket-dir-parent\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.153019 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-multus-daemon-config\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.153037 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-slash\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.153056 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/27140986-dd30-4f6b-beac-d173dca9a94c-cni-binary-copy\") pod \"multus-additional-cni-plugins-zw5l9\" (UID: \"27140986-dd30-4f6b-beac-d173dca9a94c\") " pod="openshift-multus/multus-additional-cni-plugins-zw5l9" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.153082 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-multus-cni-dir\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.153101 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/106a6e78-a004-4232-a0a2-efecf2f7c248-ovnkube-config\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.153155 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/27140986-dd30-4f6b-beac-d173dca9a94c-tuning-conf-dir\") pod \"multus-additional-cni-plugins-zw5l9\" (UID: \"27140986-dd30-4f6b-beac-d173dca9a94c\") " pod="openshift-multus/multus-additional-cni-plugins-zw5l9" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.153180 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-os-release\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.153201 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-run-ovn\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.153270 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/27140986-dd30-4f6b-beac-d173dca9a94c-os-release\") pod \"multus-additional-cni-plugins-zw5l9\" (UID: \"27140986-dd30-4f6b-beac-d173dca9a94c\") " pod="openshift-multus/multus-additional-cni-plugins-zw5l9" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.167327 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.187552 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.200845 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.213077 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.238564 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.254628 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-systemd-units\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.254679 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-etc-openvswitch\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.254703 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-djg58\" (UniqueName: \"kubernetes.io/projected/106a6e78-a004-4232-a0a2-efecf2f7c248-kube-api-access-djg58\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.254729 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/27140986-dd30-4f6b-beac-d173dca9a94c-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-zw5l9\" (UID: \"27140986-dd30-4f6b-beac-d173dca9a94c\") " pod="openshift-multus/multus-additional-cni-plugins-zw5l9" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.254754 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-cni-binary-copy\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.254774 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-multus-socket-dir-parent\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.254799 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-multus-daemon-config\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.254790 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-systemd-units\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.254844 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-etc-openvswitch\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.254821 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-slash\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.254918 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/27140986-dd30-4f6b-beac-d173dca9a94c-cni-binary-copy\") pod \"multus-additional-cni-plugins-zw5l9\" (UID: \"27140986-dd30-4f6b-beac-d173dca9a94c\") " pod="openshift-multus/multus-additional-cni-plugins-zw5l9" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.254949 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-multus-cni-dir\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.254888 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-slash\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.254973 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/106a6e78-a004-4232-a0a2-efecf2f7c248-ovnkube-config\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255005 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/27140986-dd30-4f6b-beac-d173dca9a94c-tuning-conf-dir\") pod \"multus-additional-cni-plugins-zw5l9\" (UID: \"27140986-dd30-4f6b-beac-d173dca9a94c\") " pod="openshift-multus/multus-additional-cni-plugins-zw5l9" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255030 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-os-release\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255050 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-run-ovn\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255089 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/27140986-dd30-4f6b-beac-d173dca9a94c-os-release\") pod \"multus-additional-cni-plugins-zw5l9\" (UID: \"27140986-dd30-4f6b-beac-d173dca9a94c\") " pod="openshift-multus/multus-additional-cni-plugins-zw5l9" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255095 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-multus-socket-dir-parent\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255148 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-host-run-k8s-cni-cncf-io\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255109 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-host-run-k8s-cni-cncf-io\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255210 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-host-var-lib-cni-bin\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255229 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-host-var-lib-cni-multus\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255253 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-node-log\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255294 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/27140986-dd30-4f6b-beac-d173dca9a94c-system-cni-dir\") pod \"multus-additional-cni-plugins-zw5l9\" (UID: \"27140986-dd30-4f6b-beac-d173dca9a94c\") " pod="openshift-multus/multus-additional-cni-plugins-zw5l9" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255315 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-etc-kubernetes\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255327 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-host-var-lib-cni-bin\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255376 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-cni-bin\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255353 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-cni-bin\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255408 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-node-log\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255417 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255444 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zpl5k\" (UniqueName: \"kubernetes.io/projected/27140986-dd30-4f6b-beac-d173dca9a94c-kube-api-access-zpl5k\") pod \"multus-additional-cni-plugins-zw5l9\" (UID: \"27140986-dd30-4f6b-beac-d173dca9a94c\") " pod="openshift-multus/multus-additional-cni-plugins-zw5l9" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255446 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-host-var-lib-cni-multus\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255513 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-system-cni-dir\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255536 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-var-lib-openvswitch\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255555 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-cni-netd\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255577 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-host-var-lib-kubelet\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255596 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-multus-conf-dir\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255615 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jhm5j\" (UniqueName: \"kubernetes.io/projected/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-kube-api-access-jhm5j\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255625 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-run-ovn\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255636 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/106a6e78-a004-4232-a0a2-efecf2f7c248-ovnkube-script-lib\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255686 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-kubelet\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255706 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-log-socket\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255723 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/106a6e78-a004-4232-a0a2-efecf2f7c248-env-overrides\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255743 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/106a6e78-a004-4232-a0a2-efecf2f7c248-ovn-node-metrics-cert\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255751 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-multus-daemon-config\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255796 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/27140986-dd30-4f6b-beac-d173dca9a94c-cnibin\") pod \"multus-additional-cni-plugins-zw5l9\" (UID: \"27140986-dd30-4f6b-beac-d173dca9a94c\") " pod="openshift-multus/multus-additional-cni-plugins-zw5l9" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255795 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/27140986-dd30-4f6b-beac-d173dca9a94c-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-zw5l9\" (UID: \"27140986-dd30-4f6b-beac-d173dca9a94c\") " pod="openshift-multus/multus-additional-cni-plugins-zw5l9" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255822 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-hostroot\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255830 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-host-var-lib-kubelet\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255842 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-run-ovn-kubernetes\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255337 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-multus-cni-dir\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255865 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-system-cni-dir\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255881 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-run-ovn-kubernetes\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255900 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255890 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/27140986-dd30-4f6b-beac-d173dca9a94c-system-cni-dir\") pod \"multus-additional-cni-plugins-zw5l9\" (UID: \"27140986-dd30-4f6b-beac-d173dca9a94c\") " pod="openshift-multus/multus-additional-cni-plugins-zw5l9" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255914 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-cni-binary-copy\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255951 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-multus-conf-dir\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255958 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-cni-netd\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255951 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-var-lib-openvswitch\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255914 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-etc-kubernetes\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255933 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-host-run-netns\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255978 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-kubelet\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.256001 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-log-socket\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255997 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/27140986-dd30-4f6b-beac-d173dca9a94c-cni-binary-copy\") pod \"multus-additional-cni-plugins-zw5l9\" (UID: \"27140986-dd30-4f6b-beac-d173dca9a94c\") " pod="openshift-multus/multus-additional-cni-plugins-zw5l9" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.256020 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/27140986-dd30-4f6b-beac-d173dca9a94c-os-release\") pod \"multus-additional-cni-plugins-zw5l9\" (UID: \"27140986-dd30-4f6b-beac-d173dca9a94c\") " pod="openshift-multus/multus-additional-cni-plugins-zw5l9" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.256033 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/27140986-dd30-4f6b-beac-d173dca9a94c-cnibin\") pod \"multus-additional-cni-plugins-zw5l9\" (UID: \"27140986-dd30-4f6b-beac-d173dca9a94c\") " pod="openshift-multus/multus-additional-cni-plugins-zw5l9" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255786 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-os-release\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.255912 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-host-run-netns\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.256062 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/106a6e78-a004-4232-a0a2-efecf2f7c248-ovnkube-config\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.256092 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-hostroot\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.256129 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-host-run-multus-certs\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.256231 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-run-openvswitch\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.256236 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-host-run-multus-certs\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.256246 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/106a6e78-a004-4232-a0a2-efecf2f7c248-ovnkube-script-lib\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.256273 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-run-openvswitch\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.256316 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-cnibin\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.256336 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-run-netns\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.256340 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/27140986-dd30-4f6b-beac-d173dca9a94c-tuning-conf-dir\") pod \"multus-additional-cni-plugins-zw5l9\" (UID: \"27140986-dd30-4f6b-beac-d173dca9a94c\") " pod="openshift-multus/multus-additional-cni-plugins-zw5l9" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.256363 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-run-systemd\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.256372 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-cnibin\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.256390 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/106a6e78-a004-4232-a0a2-efecf2f7c248-env-overrides\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.256355 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-run-netns\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.256396 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-run-systemd\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.264000 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.272092 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/106a6e78-a004-4232-a0a2-efecf2f7c248-ovn-node-metrics-cert\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.289273 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-djg58\" (UniqueName: \"kubernetes.io/projected/106a6e78-a004-4232-a0a2-efecf2f7c248-kube-api-access-djg58\") pod \"ovnkube-node-6f24c\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.290943 4691 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-11-24 07:52:41 +0000 UTC, rotation deadline is 2026-09-25 12:04:21.789468437 +0000 UTC Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.291006 4691 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 7324h6m39.498465084s for next certificate rotation Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.302033 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jhm5j\" (UniqueName: \"kubernetes.io/projected/b2332a73-f85c-470c-9209-c5e5cd1bc3a1-kube-api-access-jhm5j\") pod \"multus-gxxrf\" (UID: \"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\") " pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.304832 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zpl5k\" (UniqueName: \"kubernetes.io/projected/27140986-dd30-4f6b-beac-d173dca9a94c-kube-api-access-zpl5k\") pod \"multus-additional-cni-plugins-zw5l9\" (UID: \"27140986-dd30-4f6b-beac-d173dca9a94c\") " pod="openshift-multus/multus-additional-cni-plugins-zw5l9" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.313116 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.344343 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.385854 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.408066 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.424336 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.433114 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.439168 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.441611 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-gxxrf" Nov 24 07:57:42 crc kubenswrapper[4691]: W1124 07:57:42.446465 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod27140986_dd30_4f6b_beac_d173dca9a94c.slice/crio-68a1b12cab13339500b5ef39a5da507b4f685a7c248ffa3cfd50e010e65a9c48 WatchSource:0}: Error finding container 68a1b12cab13339500b5ef39a5da507b4f685a7c248ffa3cfd50e010e65a9c48: Status 404 returned error can't find the container with id 68a1b12cab13339500b5ef39a5da507b4f685a7c248ffa3cfd50e010e65a9c48 Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.450058 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.454211 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: W1124 07:57:42.455075 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb2332a73_f85c_470c_9209_c5e5cd1bc3a1.slice/crio-95d881e960e09cf1338cef91519f14efff860c86e3a7b92053cd146e92647d0b WatchSource:0}: Error finding container 95d881e960e09cf1338cef91519f14efff860c86e3a7b92053cd146e92647d0b: Status 404 returned error can't find the container with id 95d881e960e09cf1338cef91519f14efff860c86e3a7b92053cd146e92647d0b Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.471293 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: W1124 07:57:42.474412 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod106a6e78_a004_4232_a0a2_efecf2f7c248.slice/crio-6ebc38bdcbb47f8898ca2377a606c51091bc64c3f9eaeecad1b38e1072edde49 WatchSource:0}: Error finding container 6ebc38bdcbb47f8898ca2377a606c51091bc64c3f9eaeecad1b38e1072edde49: Status 404 returned error can't find the container with id 6ebc38bdcbb47f8898ca2377a606c51091bc64c3f9eaeecad1b38e1072edde49 Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.482617 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.495908 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.500440 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.511154 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.513104 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.517056 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.539216 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.558736 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.574995 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.598478 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.615148 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.634642 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.651158 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.666169 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.687691 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.701369 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.714509 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.726621 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.740910 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.753030 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.759994 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.760114 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:57:42 crc kubenswrapper[4691]: E1124 07:57:42.760156 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:57:42 crc kubenswrapper[4691]: E1124 07:57:42.760289 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.769936 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.791557 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.812284 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.830410 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.846589 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.919363 4691 generic.go:334] "Generic (PLEG): container finished" podID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerID="00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb" exitCode=0 Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.919470 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" event={"ID":"106a6e78-a004-4232-a0a2-efecf2f7c248","Type":"ContainerDied","Data":"00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb"} Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.919530 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" event={"ID":"106a6e78-a004-4232-a0a2-efecf2f7c248","Type":"ContainerStarted","Data":"6ebc38bdcbb47f8898ca2377a606c51091bc64c3f9eaeecad1b38e1072edde49"} Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.921434 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-gxxrf" event={"ID":"b2332a73-f85c-470c-9209-c5e5cd1bc3a1","Type":"ContainerStarted","Data":"ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703"} Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.921525 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-gxxrf" event={"ID":"b2332a73-f85c-470c-9209-c5e5cd1bc3a1","Type":"ContainerStarted","Data":"95d881e960e09cf1338cef91519f14efff860c86e3a7b92053cd146e92647d0b"} Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.923645 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerStarted","Data":"df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577"} Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.923716 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerStarted","Data":"bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb"} Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.923739 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerStarted","Data":"087174c71d5a3abe0dbad42ab09873f836fac2beb74d25ea8cca85158eb9110a"} Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.925674 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-frdx5" event={"ID":"b886b151-658b-493c-b186-658ca0533f06","Type":"ContainerStarted","Data":"3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2"} Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.925756 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-frdx5" event={"ID":"b886b151-658b-493c-b186-658ca0533f06","Type":"ContainerStarted","Data":"59ada43275f8073e32853001f3d4dcc2d2d8c3d08f62fe8d3fa74c1877948bf6"} Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.926779 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" event={"ID":"27140986-dd30-4f6b-beac-d173dca9a94c","Type":"ContainerStarted","Data":"85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625"} Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.926817 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" event={"ID":"27140986-dd30-4f6b-beac-d173dca9a94c","Type":"ContainerStarted","Data":"68a1b12cab13339500b5ef39a5da507b4f685a7c248ffa3cfd50e010e65a9c48"} Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.937470 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.955650 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:42 crc kubenswrapper[4691]: I1124 07:57:42.981087 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.001784 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:42Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.020548 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.035547 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.048399 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.062923 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.079342 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.091253 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.105504 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.119077 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.132769 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.152293 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.198886 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.232960 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.273623 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.311071 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.360173 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.391089 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.431425 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.471712 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.512655 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.551791 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.598394 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.636721 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.684232 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.709110 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.759724 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:57:43 crc kubenswrapper[4691]: E1124 07:57:43.760400 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.933354 4691 generic.go:334] "Generic (PLEG): container finished" podID="27140986-dd30-4f6b-beac-d173dca9a94c" containerID="85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625" exitCode=0 Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.933424 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" event={"ID":"27140986-dd30-4f6b-beac-d173dca9a94c","Type":"ContainerDied","Data":"85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625"} Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.939392 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" event={"ID":"106a6e78-a004-4232-a0a2-efecf2f7c248","Type":"ContainerStarted","Data":"3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539"} Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.939486 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" event={"ID":"106a6e78-a004-4232-a0a2-efecf2f7c248","Type":"ContainerStarted","Data":"7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669"} Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.939500 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" event={"ID":"106a6e78-a004-4232-a0a2-efecf2f7c248","Type":"ContainerStarted","Data":"67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf"} Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.939510 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" event={"ID":"106a6e78-a004-4232-a0a2-efecf2f7c248","Type":"ContainerStarted","Data":"7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1"} Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.939519 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" event={"ID":"106a6e78-a004-4232-a0a2-efecf2f7c248","Type":"ContainerStarted","Data":"59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693"} Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.951826 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.970001 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:43 crc kubenswrapper[4691]: I1124 07:57:43.984002 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.008087 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:44Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.025007 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:44Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.040538 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:44Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.052542 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:44Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.073091 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:44Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.092839 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:44Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.100213 4691 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.102569 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.102603 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.102613 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.102760 4691 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.120865 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:44Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.162236 4691 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.162615 4691 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.164112 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.164179 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.164195 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.164218 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.164231 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:44Z","lastTransitionTime":"2025-11-24T07:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:44 crc kubenswrapper[4691]: E1124 07:57:44.181733 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:44Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.185991 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.186029 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.186039 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.186058 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.186072 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:44Z","lastTransitionTime":"2025-11-24T07:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.191645 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:44Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:44 crc kubenswrapper[4691]: E1124 07:57:44.201017 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:44Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.206297 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.206340 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.206353 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.206370 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.206383 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:44Z","lastTransitionTime":"2025-11-24T07:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:44 crc kubenswrapper[4691]: E1124 07:57:44.221242 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:44Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.225170 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.225216 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.225236 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.225264 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.225281 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:44Z","lastTransitionTime":"2025-11-24T07:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.232922 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:44Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:44 crc kubenswrapper[4691]: E1124 07:57:44.240331 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:44Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.245585 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.245648 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.245663 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.245688 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.245716 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:44Z","lastTransitionTime":"2025-11-24T07:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:44 crc kubenswrapper[4691]: E1124 07:57:44.260916 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:44Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:44 crc kubenswrapper[4691]: E1124 07:57:44.261061 4691 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.263388 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.263461 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.263473 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.263490 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.263505 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:44Z","lastTransitionTime":"2025-11-24T07:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.277114 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:44Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.313001 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:44Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.366306 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.366388 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.366404 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.366426 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.366442 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:44Z","lastTransitionTime":"2025-11-24T07:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.416391 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-7pdtc"] Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.417267 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-7pdtc" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.420226 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.420420 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.420516 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.422536 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.436902 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:44Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.469007 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.469061 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.469074 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.469097 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.469113 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:44Z","lastTransitionTime":"2025-11-24T07:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.473218 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:44Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.478596 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8qn6m\" (UniqueName: \"kubernetes.io/projected/c031c6f0-57e9-4339-ac63-323d1effb276-kube-api-access-8qn6m\") pod \"node-ca-7pdtc\" (UID: \"c031c6f0-57e9-4339-ac63-323d1effb276\") " pod="openshift-image-registry/node-ca-7pdtc" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.478651 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/c031c6f0-57e9-4339-ac63-323d1effb276-serviceca\") pod \"node-ca-7pdtc\" (UID: \"c031c6f0-57e9-4339-ac63-323d1effb276\") " pod="openshift-image-registry/node-ca-7pdtc" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.478691 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c031c6f0-57e9-4339-ac63-323d1effb276-host\") pod \"node-ca-7pdtc\" (UID: \"c031c6f0-57e9-4339-ac63-323d1effb276\") " pod="openshift-image-registry/node-ca-7pdtc" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.512095 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:44Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.567765 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:44Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.571682 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.571721 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.571731 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.571752 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.571766 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:44Z","lastTransitionTime":"2025-11-24T07:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.580522 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8qn6m\" (UniqueName: \"kubernetes.io/projected/c031c6f0-57e9-4339-ac63-323d1effb276-kube-api-access-8qn6m\") pod \"node-ca-7pdtc\" (UID: \"c031c6f0-57e9-4339-ac63-323d1effb276\") " pod="openshift-image-registry/node-ca-7pdtc" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.580576 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/c031c6f0-57e9-4339-ac63-323d1effb276-serviceca\") pod \"node-ca-7pdtc\" (UID: \"c031c6f0-57e9-4339-ac63-323d1effb276\") " pod="openshift-image-registry/node-ca-7pdtc" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.580609 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c031c6f0-57e9-4339-ac63-323d1effb276-host\") pod \"node-ca-7pdtc\" (UID: \"c031c6f0-57e9-4339-ac63-323d1effb276\") " pod="openshift-image-registry/node-ca-7pdtc" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.580727 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c031c6f0-57e9-4339-ac63-323d1effb276-host\") pod \"node-ca-7pdtc\" (UID: \"c031c6f0-57e9-4339-ac63-323d1effb276\") " pod="openshift-image-registry/node-ca-7pdtc" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.582238 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/c031c6f0-57e9-4339-ac63-323d1effb276-serviceca\") pod \"node-ca-7pdtc\" (UID: \"c031c6f0-57e9-4339-ac63-323d1effb276\") " pod="openshift-image-registry/node-ca-7pdtc" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.590284 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pdtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c031c6f0-57e9-4339-ac63-323d1effb276\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8qn6m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pdtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:44Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.619677 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8qn6m\" (UniqueName: \"kubernetes.io/projected/c031c6f0-57e9-4339-ac63-323d1effb276-kube-api-access-8qn6m\") pod \"node-ca-7pdtc\" (UID: \"c031c6f0-57e9-4339-ac63-323d1effb276\") " pod="openshift-image-registry/node-ca-7pdtc" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.661089 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:44Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.674527 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.674572 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.674588 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.674612 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.674631 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:44Z","lastTransitionTime":"2025-11-24T07:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.696347 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:44Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.734016 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:44Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.737123 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-7pdtc" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.760284 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.760382 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:57:44 crc kubenswrapper[4691]: E1124 07:57:44.760496 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:57:44 crc kubenswrapper[4691]: E1124 07:57:44.760667 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.779487 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.779569 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.779581 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.779599 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.779611 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:44Z","lastTransitionTime":"2025-11-24T07:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.780002 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:44Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.815695 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:44Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.852719 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:44Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.881567 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.881615 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.881628 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.881650 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.881663 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:44Z","lastTransitionTime":"2025-11-24T07:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.890053 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:44Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.932940 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:44Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.947035 4691 generic.go:334] "Generic (PLEG): container finished" podID="27140986-dd30-4f6b-beac-d173dca9a94c" containerID="f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69" exitCode=0 Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.947143 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" event={"ID":"27140986-dd30-4f6b-beac-d173dca9a94c","Type":"ContainerDied","Data":"f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69"} Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.953903 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" event={"ID":"106a6e78-a004-4232-a0a2-efecf2f7c248","Type":"ContainerStarted","Data":"d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883"} Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.958708 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-7pdtc" event={"ID":"c031c6f0-57e9-4339-ac63-323d1effb276","Type":"ContainerStarted","Data":"4222f9a187bbb028bd46610f145d6675ee704138957144c7df82ed6f5aa60dd2"} Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.971613 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:44Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.985130 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.985209 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.985220 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.985241 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:44 crc kubenswrapper[4691]: I1124 07:57:44.985256 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:44Z","lastTransitionTime":"2025-11-24T07:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.014795 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:45Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.049898 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:45Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.087599 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.087647 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.087659 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.087683 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.087697 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:45Z","lastTransitionTime":"2025-11-24T07:57:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.088989 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:45Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.132909 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:45Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.173393 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:45Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.190582 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.190774 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.190894 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.191006 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.191131 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:45Z","lastTransitionTime":"2025-11-24T07:57:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.211559 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:45Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.257685 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:45Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.294998 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.295061 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.295087 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.295113 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.295129 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:45Z","lastTransitionTime":"2025-11-24T07:57:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.297530 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:45Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.329943 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:45Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.370083 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:45Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.389613 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.389811 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:57:45 crc kubenswrapper[4691]: E1124 07:57:45.389939 4691 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 07:57:45 crc kubenswrapper[4691]: E1124 07:57:45.389995 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 07:57:53.389978664 +0000 UTC m=+35.388927913 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 07:57:45 crc kubenswrapper[4691]: E1124 07:57:45.390396 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 07:57:53.390343444 +0000 UTC m=+35.389292693 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.397857 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.398062 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.398155 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.398241 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.398320 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:45Z","lastTransitionTime":"2025-11-24T07:57:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.411359 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:45Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.452309 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:45Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.488101 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:45Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.490436 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.490503 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.490538 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:57:45 crc kubenswrapper[4691]: E1124 07:57:45.490652 4691 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 07:57:45 crc kubenswrapper[4691]: E1124 07:57:45.490725 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 07:57:53.490704816 +0000 UTC m=+35.489654065 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 07:57:45 crc kubenswrapper[4691]: E1124 07:57:45.490807 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 07:57:45 crc kubenswrapper[4691]: E1124 07:57:45.490869 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 07:57:45 crc kubenswrapper[4691]: E1124 07:57:45.490888 4691 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:57:45 crc kubenswrapper[4691]: E1124 07:57:45.490832 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 07:57:45 crc kubenswrapper[4691]: E1124 07:57:45.491030 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 07:57:45 crc kubenswrapper[4691]: E1124 07:57:45.491113 4691 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:57:45 crc kubenswrapper[4691]: E1124 07:57:45.490973 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-24 07:57:53.490944153 +0000 UTC m=+35.489893562 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:57:45 crc kubenswrapper[4691]: E1124 07:57:45.491260 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-24 07:57:53.491248181 +0000 UTC m=+35.490197420 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.501814 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.501861 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.501874 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.501896 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.501911 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:45Z","lastTransitionTime":"2025-11-24T07:57:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.535635 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:45Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.569772 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pdtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c031c6f0-57e9-4339-ac63-323d1effb276\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8qn6m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pdtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:45Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.604751 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.604820 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.604834 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.604865 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.604884 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:45Z","lastTransitionTime":"2025-11-24T07:57:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.618394 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:45Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.707303 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.707367 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.707380 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.707401 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.707414 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:45Z","lastTransitionTime":"2025-11-24T07:57:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.760012 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:57:45 crc kubenswrapper[4691]: E1124 07:57:45.760222 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.810068 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.810120 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.810133 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.810155 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.810169 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:45Z","lastTransitionTime":"2025-11-24T07:57:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.912799 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.912847 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.912858 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.912877 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.912890 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:45Z","lastTransitionTime":"2025-11-24T07:57:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.964763 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-7pdtc" event={"ID":"c031c6f0-57e9-4339-ac63-323d1effb276","Type":"ContainerStarted","Data":"39a35927a133bd73526e8f1e2a15274bcc887d91b27b0860cd3260189cae5c0e"} Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.967727 4691 generic.go:334] "Generic (PLEG): container finished" podID="27140986-dd30-4f6b-beac-d173dca9a94c" containerID="8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b" exitCode=0 Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.967783 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" event={"ID":"27140986-dd30-4f6b-beac-d173dca9a94c","Type":"ContainerDied","Data":"8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b"} Nov 24 07:57:45 crc kubenswrapper[4691]: I1124 07:57:45.989638 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:45Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.005595 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.015494 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.015568 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.015588 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.015617 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.015637 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:46Z","lastTransitionTime":"2025-11-24T07:57:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.029363 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.047745 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.064049 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.079343 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.093665 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.112565 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.118075 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.118124 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.118137 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.118161 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.118178 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:46Z","lastTransitionTime":"2025-11-24T07:57:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.129897 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.141692 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.164187 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.178149 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pdtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c031c6f0-57e9-4339-ac63-323d1effb276\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39a35927a133bd73526e8f1e2a15274bcc887d91b27b0860cd3260189cae5c0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8qn6m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pdtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.199672 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.216276 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.221838 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.221873 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.221886 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.221907 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.221920 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:46Z","lastTransitionTime":"2025-11-24T07:57:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.231540 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.252838 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.291315 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.379854 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.379904 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.379918 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.379947 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.379963 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:46Z","lastTransitionTime":"2025-11-24T07:57:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.390885 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.410292 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.424920 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.453897 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.482694 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.482760 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.482774 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.482790 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.482802 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:46Z","lastTransitionTime":"2025-11-24T07:57:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.490892 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.541601 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.574417 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.585790 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.585843 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.585857 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.585879 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.585894 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:46Z","lastTransitionTime":"2025-11-24T07:57:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.611764 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.654549 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.689753 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.689828 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.689848 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.689874 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.689896 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:46Z","lastTransitionTime":"2025-11-24T07:57:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.709558 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.733207 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pdtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c031c6f0-57e9-4339-ac63-323d1effb276\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39a35927a133bd73526e8f1e2a15274bcc887d91b27b0860cd3260189cae5c0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8qn6m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pdtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.760208 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:57:46 crc kubenswrapper[4691]: E1124 07:57:46.760369 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.760820 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:57:46 crc kubenswrapper[4691]: E1124 07:57:46.760915 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.773483 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.792593 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.792652 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.792663 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.792685 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.792702 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:46Z","lastTransitionTime":"2025-11-24T07:57:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.819376 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.895736 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.895819 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.895838 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.895868 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.895889 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:46Z","lastTransitionTime":"2025-11-24T07:57:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.974177 4691 generic.go:334] "Generic (PLEG): container finished" podID="27140986-dd30-4f6b-beac-d173dca9a94c" containerID="c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301" exitCode=0 Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.974274 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" event={"ID":"27140986-dd30-4f6b-beac-d173dca9a94c","Type":"ContainerDied","Data":"c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301"} Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.982706 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" event={"ID":"106a6e78-a004-4232-a0a2-efecf2f7c248","Type":"ContainerStarted","Data":"184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907"} Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.991530 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pdtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c031c6f0-57e9-4339-ac63-323d1effb276\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39a35927a133bd73526e8f1e2a15274bcc887d91b27b0860cd3260189cae5c0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8qn6m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pdtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.999613 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.999685 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.999704 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.999732 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:46 crc kubenswrapper[4691]: I1124 07:57:46.999750 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:46Z","lastTransitionTime":"2025-11-24T07:57:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.019387 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:47Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.037221 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:47Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.055142 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:47Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.066060 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:47Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.083547 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:47Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.095363 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:47Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.104132 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.104175 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.104186 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.104203 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.104213 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:47Z","lastTransitionTime":"2025-11-24T07:57:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.130265 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:47Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.177668 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:47Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.207675 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.207724 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.207735 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.207753 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.207771 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:47Z","lastTransitionTime":"2025-11-24T07:57:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.209178 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:47Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.252681 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:47Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.294243 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:47Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.311016 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.311064 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.311077 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.311097 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.311109 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:47Z","lastTransitionTime":"2025-11-24T07:57:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.333829 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:47Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.375206 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:47Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.414836 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.414899 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.414912 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.414934 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.414947 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:47Z","lastTransitionTime":"2025-11-24T07:57:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.415067 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:47Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.517686 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.517728 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.517737 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.517753 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.517764 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:47Z","lastTransitionTime":"2025-11-24T07:57:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.620555 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.620604 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.620613 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.620633 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.620646 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:47Z","lastTransitionTime":"2025-11-24T07:57:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.725950 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.726009 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.726024 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.726044 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.726059 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:47Z","lastTransitionTime":"2025-11-24T07:57:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.759637 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:57:47 crc kubenswrapper[4691]: E1124 07:57:47.759922 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.829518 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.829574 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.829595 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.829664 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.829686 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:47Z","lastTransitionTime":"2025-11-24T07:57:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.933754 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.933819 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.933839 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.933868 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.933889 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:47Z","lastTransitionTime":"2025-11-24T07:57:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.993523 4691 generic.go:334] "Generic (PLEG): container finished" podID="27140986-dd30-4f6b-beac-d173dca9a94c" containerID="b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb" exitCode=0 Nov 24 07:57:47 crc kubenswrapper[4691]: I1124 07:57:47.993595 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" event={"ID":"27140986-dd30-4f6b-beac-d173dca9a94c","Type":"ContainerDied","Data":"b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb"} Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.013329 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.036411 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.036441 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.036476 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.036490 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.036377 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.036498 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:48Z","lastTransitionTime":"2025-11-24T07:57:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.055393 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.078254 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.097487 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pdtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c031c6f0-57e9-4339-ac63-323d1effb276\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39a35927a133bd73526e8f1e2a15274bcc887d91b27b0860cd3260189cae5c0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8qn6m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pdtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.128959 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.139110 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.139160 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.139173 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.139194 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.139213 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:48Z","lastTransitionTime":"2025-11-24T07:57:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.146142 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.159732 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.172109 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.187886 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.202119 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.219117 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.230938 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.241728 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.241770 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.241778 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.241792 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.241801 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:48Z","lastTransitionTime":"2025-11-24T07:57:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.245048 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.266044 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.345678 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.345733 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.345743 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.345764 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.345778 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:48Z","lastTransitionTime":"2025-11-24T07:57:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.448874 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.448952 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.448965 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.448988 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.449000 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:48Z","lastTransitionTime":"2025-11-24T07:57:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.515738 4691 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.552484 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.552533 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.552543 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.552560 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.552571 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:48Z","lastTransitionTime":"2025-11-24T07:57:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.655151 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.655207 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.655223 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.655246 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.655261 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:48Z","lastTransitionTime":"2025-11-24T07:57:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.757643 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.757691 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.757704 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.757725 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.757739 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:48Z","lastTransitionTime":"2025-11-24T07:57:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.760127 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:57:48 crc kubenswrapper[4691]: E1124 07:57:48.760239 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.760430 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:57:48 crc kubenswrapper[4691]: E1124 07:57:48.760534 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.776672 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.793006 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.806287 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.819193 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.833482 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.856906 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.859580 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.859643 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.859657 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.859682 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.859697 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:48Z","lastTransitionTime":"2025-11-24T07:57:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.877740 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.894127 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.909025 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.922523 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pdtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c031c6f0-57e9-4339-ac63-323d1effb276\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39a35927a133bd73526e8f1e2a15274bcc887d91b27b0860cd3260189cae5c0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8qn6m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pdtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.951663 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.962769 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.962806 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.962817 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.962834 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.962845 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:48Z","lastTransitionTime":"2025-11-24T07:57:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.965676 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.978611 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:48 crc kubenswrapper[4691]: I1124 07:57:48.989774 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.005370 4691 generic.go:334] "Generic (PLEG): container finished" podID="27140986-dd30-4f6b-beac-d173dca9a94c" containerID="981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5" exitCode=0 Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.005489 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" event={"ID":"27140986-dd30-4f6b-beac-d173dca9a94c","Type":"ContainerDied","Data":"981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5"} Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.013036 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" event={"ID":"106a6e78-a004-4232-a0a2-efecf2f7c248","Type":"ContainerStarted","Data":"b7efa7bbcd921fadd3fa72c88ada484cac792b21c57f32f23f5304e8960068b2"} Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.013494 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.017514 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.038120 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.049585 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.057962 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.065852 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.065885 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.065896 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.065915 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.065929 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:49Z","lastTransitionTime":"2025-11-24T07:57:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.074760 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.088760 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.103802 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.116654 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.135142 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.147922 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.169305 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.169357 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.169371 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.169391 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.169406 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:49Z","lastTransitionTime":"2025-11-24T07:57:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.171503 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7efa7bbcd921fadd3fa72c88ada484cac792b21c57f32f23f5304e8960068b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.183944 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pdtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c031c6f0-57e9-4339-ac63-323d1effb276\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39a35927a133bd73526e8f1e2a15274bcc887d91b27b0860cd3260189cae5c0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8qn6m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pdtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.206746 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.221012 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.234885 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.248735 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.261285 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.272354 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.272391 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.272403 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.272418 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.272429 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:49Z","lastTransitionTime":"2025-11-24T07:57:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.273577 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.291414 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.332399 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.373499 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.374516 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.374558 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.374566 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.374579 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.374589 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:49Z","lastTransitionTime":"2025-11-24T07:57:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.411718 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.456907 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.477352 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.477414 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.477430 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.477476 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.477494 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:49Z","lastTransitionTime":"2025-11-24T07:57:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.490814 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.552512 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7efa7bbcd921fadd3fa72c88ada484cac792b21c57f32f23f5304e8960068b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.570279 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pdtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c031c6f0-57e9-4339-ac63-323d1effb276\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39a35927a133bd73526e8f1e2a15274bcc887d91b27b0860cd3260189cae5c0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8qn6m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pdtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.581283 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.581363 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.581375 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.581396 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.581413 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:49Z","lastTransitionTime":"2025-11-24T07:57:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.616690 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.650764 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.684231 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.684268 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.684277 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.684294 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.684304 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:49Z","lastTransitionTime":"2025-11-24T07:57:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.691544 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.729306 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.759533 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:57:49 crc kubenswrapper[4691]: E1124 07:57:49.759659 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.768558 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.786950 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.787205 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.787308 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.787387 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.787484 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:49Z","lastTransitionTime":"2025-11-24T07:57:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.809214 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.889782 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.890139 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.890217 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.890301 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.890377 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:49Z","lastTransitionTime":"2025-11-24T07:57:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.993276 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.993314 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.993325 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.993341 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:49 crc kubenswrapper[4691]: I1124 07:57:49.993352 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:49Z","lastTransitionTime":"2025-11-24T07:57:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.020001 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" event={"ID":"27140986-dd30-4f6b-beac-d173dca9a94c","Type":"ContainerStarted","Data":"730c8a45dbc29d5f37334a467817701d86334096f481ad6e97debbffad15b8ff"} Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.020162 4691 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.020712 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.038798 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://730c8a45dbc29d5f37334a467817701d86334096f481ad6e97debbffad15b8ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:50Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.072785 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:50Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.077222 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.089293 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:50Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.095630 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.095683 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.095705 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.095723 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.095736 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:50Z","lastTransitionTime":"2025-11-24T07:57:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.102664 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:50Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.115475 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:50Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.127132 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:50Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.140798 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:50Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.151597 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:50Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.175842 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7efa7bbcd921fadd3fa72c88ada484cac792b21c57f32f23f5304e8960068b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:50Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.198325 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.198367 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.198383 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.198399 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.198412 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:50Z","lastTransitionTime":"2025-11-24T07:57:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.209611 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pdtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c031c6f0-57e9-4339-ac63-323d1effb276\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39a35927a133bd73526e8f1e2a15274bcc887d91b27b0860cd3260189cae5c0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8qn6m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pdtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:50Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.263248 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:50Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.291694 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:50Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.300626 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.300678 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.300694 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.300712 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.300725 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:50Z","lastTransitionTime":"2025-11-24T07:57:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.330228 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:50Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.370296 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:50Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.403013 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.403057 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.403067 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.403081 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.403113 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:50Z","lastTransitionTime":"2025-11-24T07:57:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.410513 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:50Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.458213 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:50Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.495745 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:50Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.505351 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.505383 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.505391 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.505407 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.505417 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:50Z","lastTransitionTime":"2025-11-24T07:57:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.543825 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:50Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.571734 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://730c8a45dbc29d5f37334a467817701d86334096f481ad6e97debbffad15b8ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:50Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.612738 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.612795 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.612809 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.612830 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.612845 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:50Z","lastTransitionTime":"2025-11-24T07:57:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.619751 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:50Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.649836 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:50Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.689248 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:50Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.714664 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.714721 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.714743 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.714764 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.714782 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:50Z","lastTransitionTime":"2025-11-24T07:57:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.729340 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pdtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c031c6f0-57e9-4339-ac63-323d1effb276\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39a35927a133bd73526e8f1e2a15274bcc887d91b27b0860cd3260189cae5c0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8qn6m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pdtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:50Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.760596 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.760445 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:57:50 crc kubenswrapper[4691]: E1124 07:57:50.760857 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:57:50 crc kubenswrapper[4691]: E1124 07:57:50.761120 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.781131 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:50Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.812973 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:50Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.816684 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.816741 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.816758 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.816779 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.816790 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:50Z","lastTransitionTime":"2025-11-24T07:57:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.853560 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:50Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.888020 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:50Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.919733 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.919807 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.919820 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.919858 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.919872 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:50Z","lastTransitionTime":"2025-11-24T07:57:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.941855 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7efa7bbcd921fadd3fa72c88ada484cac792b21c57f32f23f5304e8960068b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:50Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:50 crc kubenswrapper[4691]: I1124 07:57:50.971040 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:50Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.013794 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:51Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.023144 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.023217 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.023232 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.023251 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.023264 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:51Z","lastTransitionTime":"2025-11-24T07:57:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.024926 4691 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.126371 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.126828 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.126840 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.126861 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.126872 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:51Z","lastTransitionTime":"2025-11-24T07:57:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.230705 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.230813 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.230852 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.230897 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.230924 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:51Z","lastTransitionTime":"2025-11-24T07:57:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.334033 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.334136 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.334166 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.334203 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.334232 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:51Z","lastTransitionTime":"2025-11-24T07:57:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.438781 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.438869 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.438890 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.438920 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.438940 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:51Z","lastTransitionTime":"2025-11-24T07:57:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.543814 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.543882 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.543902 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.543930 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.543973 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:51Z","lastTransitionTime":"2025-11-24T07:57:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.647996 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.648049 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.648063 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.648089 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.648105 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:51Z","lastTransitionTime":"2025-11-24T07:57:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.750823 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.751072 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.751085 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.751125 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.751141 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:51Z","lastTransitionTime":"2025-11-24T07:57:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.760609 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:57:51 crc kubenswrapper[4691]: E1124 07:57:51.760896 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.854895 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.854945 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.854958 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.855006 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.855020 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:51Z","lastTransitionTime":"2025-11-24T07:57:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.958398 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.958478 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.958494 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.958519 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:51 crc kubenswrapper[4691]: I1124 07:57:51.958533 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:51Z","lastTransitionTime":"2025-11-24T07:57:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.029932 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6f24c_106a6e78-a004-4232-a0a2-efecf2f7c248/ovnkube-controller/0.log" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.034267 4691 generic.go:334] "Generic (PLEG): container finished" podID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerID="b7efa7bbcd921fadd3fa72c88ada484cac792b21c57f32f23f5304e8960068b2" exitCode=1 Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.034342 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" event={"ID":"106a6e78-a004-4232-a0a2-efecf2f7c248","Type":"ContainerDied","Data":"b7efa7bbcd921fadd3fa72c88ada484cac792b21c57f32f23f5304e8960068b2"} Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.034972 4691 scope.go:117] "RemoveContainer" containerID="b7efa7bbcd921fadd3fa72c88ada484cac792b21c57f32f23f5304e8960068b2" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.052248 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:52Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.060897 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.060954 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.060965 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.060985 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.061018 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:52Z","lastTransitionTime":"2025-11-24T07:57:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.067910 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:52Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.079629 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:52Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.104893 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7efa7bbcd921fadd3fa72c88ada484cac792b21c57f32f23f5304e8960068b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7efa7bbcd921fadd3fa72c88ada484cac792b21c57f32f23f5304e8960068b2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T07:57:51Z\\\",\\\"message\\\":\\\"o:190] Sending *v1.Pod event handler 3 for removal\\\\nI1124 07:57:51.241034 5973 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1124 07:57:51.241096 5973 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 07:57:51.241278 5973 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 07:57:51.241302 5973 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 07:57:51.241316 5973 handler.go:208] Removed *v1.Node event handler 2\\\\nI1124 07:57:51.241328 5973 handler.go:208] Removed *v1.Node event handler 7\\\\nI1124 07:57:51.241341 5973 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1124 07:57:51.241538 5973 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 07:57:51.242286 5973 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1124 07:57:51.242323 5973 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1124 07:57:51.242359 5973 factory.go:656] Stopping watch factory\\\\nI1124 07:57:51.242382 5973 ovnkube.go:599] Stopped ovnkube\\\\nI1124 07:57:51.242404 5973 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1124 07:57:5\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:52Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.119008 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pdtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c031c6f0-57e9-4339-ac63-323d1effb276\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39a35927a133bd73526e8f1e2a15274bcc887d91b27b0860cd3260189cae5c0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8qn6m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pdtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:52Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.141122 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:52Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.156118 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:52Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.163394 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.163468 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.163484 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.163510 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.163540 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:52Z","lastTransitionTime":"2025-11-24T07:57:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.179540 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:52Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.197072 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:52Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.207830 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.214691 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:52Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.229150 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://730c8a45dbc29d5f37334a467817701d86334096f481ad6e97debbffad15b8ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:52Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.262860 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:52Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.273727 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.273783 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.273797 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.273820 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.273834 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:52Z","lastTransitionTime":"2025-11-24T07:57:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.304830 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:52Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.323269 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:52Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.338399 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:52Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.353465 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:52Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.366241 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:52Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.376538 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.376581 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.376592 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.376614 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.376626 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:52Z","lastTransitionTime":"2025-11-24T07:57:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.383050 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:52Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.396156 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:52Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.413923 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7efa7bbcd921fadd3fa72c88ada484cac792b21c57f32f23f5304e8960068b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7efa7bbcd921fadd3fa72c88ada484cac792b21c57f32f23f5304e8960068b2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T07:57:51Z\\\",\\\"message\\\":\\\"o:190] Sending *v1.Pod event handler 3 for removal\\\\nI1124 07:57:51.241034 5973 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1124 07:57:51.241096 5973 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 07:57:51.241278 5973 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 07:57:51.241302 5973 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 07:57:51.241316 5973 handler.go:208] Removed *v1.Node event handler 2\\\\nI1124 07:57:51.241328 5973 handler.go:208] Removed *v1.Node event handler 7\\\\nI1124 07:57:51.241341 5973 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1124 07:57:51.241538 5973 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 07:57:51.242286 5973 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1124 07:57:51.242323 5973 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1124 07:57:51.242359 5973 factory.go:656] Stopping watch factory\\\\nI1124 07:57:51.242382 5973 ovnkube.go:599] Stopped ovnkube\\\\nI1124 07:57:51.242404 5973 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1124 07:57:5\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:52Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.428861 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pdtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c031c6f0-57e9-4339-ac63-323d1effb276\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39a35927a133bd73526e8f1e2a15274bcc887d91b27b0860cd3260189cae5c0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8qn6m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pdtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:52Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.458424 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:52Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.480032 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.480078 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.480093 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.480119 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.480132 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:52Z","lastTransitionTime":"2025-11-24T07:57:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.481735 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:52Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.500675 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:52Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.516769 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:52Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.535618 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:52Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.554410 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://730c8a45dbc29d5f37334a467817701d86334096f481ad6e97debbffad15b8ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:52Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.574017 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:52Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.582232 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.582292 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.582306 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.582329 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.582346 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:52Z","lastTransitionTime":"2025-11-24T07:57:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.597818 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:52Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.615976 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:52Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.685364 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.685430 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.685461 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.685486 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.685500 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:52Z","lastTransitionTime":"2025-11-24T07:57:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.759634 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.759745 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:57:52 crc kubenswrapper[4691]: E1124 07:57:52.759772 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:57:52 crc kubenswrapper[4691]: E1124 07:57:52.760024 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.788894 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.788964 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.788978 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.789003 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.789018 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:52Z","lastTransitionTime":"2025-11-24T07:57:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.892886 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.892963 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.892983 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.893020 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.893042 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:52Z","lastTransitionTime":"2025-11-24T07:57:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.996509 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.996583 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.996603 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.996633 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:52 crc kubenswrapper[4691]: I1124 07:57:52.996654 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:52Z","lastTransitionTime":"2025-11-24T07:57:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.041757 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6f24c_106a6e78-a004-4232-a0a2-efecf2f7c248/ovnkube-controller/0.log" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.045804 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" event={"ID":"106a6e78-a004-4232-a0a2-efecf2f7c248","Type":"ContainerStarted","Data":"ff52c2a30eba958adc42ad51a8be59aa9a48a634e4ab31a247d12af6c72335ea"} Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.045989 4691 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.063872 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:53Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.083910 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:53Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.099554 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:53Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.100403 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.100471 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.100491 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.100517 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.100530 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:53Z","lastTransitionTime":"2025-11-24T07:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.118492 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:53Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.139313 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://730c8a45dbc29d5f37334a467817701d86334096f481ad6e97debbffad15b8ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:53Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.155833 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:53Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.177682 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:53Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.193309 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:53Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.202877 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.202914 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.202923 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.202940 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.202951 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:53Z","lastTransitionTime":"2025-11-24T07:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.207709 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:53Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.225074 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:53Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.237939 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:53Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.250555 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:53Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.269321 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff52c2a30eba958adc42ad51a8be59aa9a48a634e4ab31a247d12af6c72335ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7efa7bbcd921fadd3fa72c88ada484cac792b21c57f32f23f5304e8960068b2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T07:57:51Z\\\",\\\"message\\\":\\\"o:190] Sending *v1.Pod event handler 3 for removal\\\\nI1124 07:57:51.241034 5973 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1124 07:57:51.241096 5973 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 07:57:51.241278 5973 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 07:57:51.241302 5973 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 07:57:51.241316 5973 handler.go:208] Removed *v1.Node event handler 2\\\\nI1124 07:57:51.241328 5973 handler.go:208] Removed *v1.Node event handler 7\\\\nI1124 07:57:51.241341 5973 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1124 07:57:51.241538 5973 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 07:57:51.242286 5973 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1124 07:57:51.242323 5973 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1124 07:57:51.242359 5973 factory.go:656] Stopping watch factory\\\\nI1124 07:57:51.242382 5973 ovnkube.go:599] Stopped ovnkube\\\\nI1124 07:57:51.242404 5973 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1124 07:57:5\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:53Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.280679 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pdtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c031c6f0-57e9-4339-ac63-323d1effb276\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39a35927a133bd73526e8f1e2a15274bcc887d91b27b0860cd3260189cae5c0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8qn6m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pdtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:53Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.304192 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:53Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.306603 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.306655 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.306667 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.306688 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.306701 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:53Z","lastTransitionTime":"2025-11-24T07:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.410572 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.410647 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.410660 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.410681 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.410698 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:53Z","lastTransitionTime":"2025-11-24T07:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.485249 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 07:57:53 crc kubenswrapper[4691]: E1124 07:57:53.485653 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 07:58:09.485598988 +0000 UTC m=+51.484548277 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.485952 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:57:53 crc kubenswrapper[4691]: E1124 07:57:53.486116 4691 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 07:57:53 crc kubenswrapper[4691]: E1124 07:57:53.486200 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 07:58:09.486169174 +0000 UTC m=+51.485118413 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.513353 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.513428 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.513477 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.513507 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.513525 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:53Z","lastTransitionTime":"2025-11-24T07:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.587225 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.587352 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.587444 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:57:53 crc kubenswrapper[4691]: E1124 07:57:53.587596 4691 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 07:57:53 crc kubenswrapper[4691]: E1124 07:57:53.587682 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 07:57:53 crc kubenswrapper[4691]: E1124 07:57:53.587716 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 07:57:53 crc kubenswrapper[4691]: E1124 07:57:53.587743 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 07:57:53 crc kubenswrapper[4691]: E1124 07:57:53.587762 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 07:57:53 crc kubenswrapper[4691]: E1124 07:57:53.587767 4691 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:57:53 crc kubenswrapper[4691]: E1124 07:57:53.587790 4691 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:57:53 crc kubenswrapper[4691]: E1124 07:57:53.587706 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 07:58:09.587678648 +0000 UTC m=+51.586627927 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 07:57:53 crc kubenswrapper[4691]: E1124 07:57:53.587873 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-24 07:58:09.587854513 +0000 UTC m=+51.586803792 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:57:53 crc kubenswrapper[4691]: E1124 07:57:53.587900 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-24 07:58:09.587885484 +0000 UTC m=+51.586834763 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.616578 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.616634 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.616646 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.616669 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.616686 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:53Z","lastTransitionTime":"2025-11-24T07:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.720021 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.720170 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.720207 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.720236 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.720263 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:53Z","lastTransitionTime":"2025-11-24T07:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.759923 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:57:53 crc kubenswrapper[4691]: E1124 07:57:53.760117 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.823610 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.823691 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.823706 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.823727 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.823741 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:53Z","lastTransitionTime":"2025-11-24T07:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.926687 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.926786 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.926808 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.926847 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:53 crc kubenswrapper[4691]: I1124 07:57:53.926869 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:53Z","lastTransitionTime":"2025-11-24T07:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.030586 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.030646 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.030665 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.030697 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.030717 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:54Z","lastTransitionTime":"2025-11-24T07:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.054037 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6f24c_106a6e78-a004-4232-a0a2-efecf2f7c248/ovnkube-controller/1.log" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.054989 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6f24c_106a6e78-a004-4232-a0a2-efecf2f7c248/ovnkube-controller/0.log" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.059796 4691 generic.go:334] "Generic (PLEG): container finished" podID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerID="ff52c2a30eba958adc42ad51a8be59aa9a48a634e4ab31a247d12af6c72335ea" exitCode=1 Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.059855 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" event={"ID":"106a6e78-a004-4232-a0a2-efecf2f7c248","Type":"ContainerDied","Data":"ff52c2a30eba958adc42ad51a8be59aa9a48a634e4ab31a247d12af6c72335ea"} Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.059927 4691 scope.go:117] "RemoveContainer" containerID="b7efa7bbcd921fadd3fa72c88ada484cac792b21c57f32f23f5304e8960068b2" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.062196 4691 scope.go:117] "RemoveContainer" containerID="ff52c2a30eba958adc42ad51a8be59aa9a48a634e4ab31a247d12af6c72335ea" Nov 24 07:57:54 crc kubenswrapper[4691]: E1124 07:57:54.062969 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-6f24c_openshift-ovn-kubernetes(106a6e78-a004-4232-a0a2-efecf2f7c248)\"" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.081287 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.102839 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.124386 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.134696 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.134749 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.134771 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.134800 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.134824 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:54Z","lastTransitionTime":"2025-11-24T07:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.151997 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.176557 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.199471 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://730c8a45dbc29d5f37334a467817701d86334096f481ad6e97debbffad15b8ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.219933 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.237322 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.237369 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.237381 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.237405 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.237418 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:54Z","lastTransitionTime":"2025-11-24T07:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.238642 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.260090 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.281868 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.301011 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.318175 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.330882 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.336370 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.336427 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.336469 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.336506 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.336526 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:54Z","lastTransitionTime":"2025-11-24T07:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:54 crc kubenswrapper[4691]: E1124 07:57:54.352729 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.355637 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff52c2a30eba958adc42ad51a8be59aa9a48a634e4ab31a247d12af6c72335ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7efa7bbcd921fadd3fa72c88ada484cac792b21c57f32f23f5304e8960068b2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T07:57:51Z\\\",\\\"message\\\":\\\"o:190] Sending *v1.Pod event handler 3 for removal\\\\nI1124 07:57:51.241034 5973 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1124 07:57:51.241096 5973 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 07:57:51.241278 5973 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 07:57:51.241302 5973 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 07:57:51.241316 5973 handler.go:208] Removed *v1.Node event handler 2\\\\nI1124 07:57:51.241328 5973 handler.go:208] Removed *v1.Node event handler 7\\\\nI1124 07:57:51.241341 5973 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1124 07:57:51.241538 5973 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 07:57:51.242286 5973 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1124 07:57:51.242323 5973 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1124 07:57:51.242359 5973 factory.go:656] Stopping watch factory\\\\nI1124 07:57:51.242382 5973 ovnkube.go:599] Stopped ovnkube\\\\nI1124 07:57:51.242404 5973 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1124 07:57:5\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff52c2a30eba958adc42ad51a8be59aa9a48a634e4ab31a247d12af6c72335ea\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T07:57:53Z\\\",\\\"message\\\":\\\"tor.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 07:57:53.045562 6118 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1124 07:57:53.045585 6118 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1124 07:57:53.045608 6118 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1124 07:57:53.045613 6118 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1124 07:57:53.045643 6118 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 07:57:53.045653 6118 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 07:57:53.045650 6118 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1124 07:57:53.045664 6118 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1124 07:57:53.045668 6118 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1124 07:57:53.045673 6118 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 07:57:53.045682 6118 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 07:57:53.045707 6118 handler.go:208] Removed *v1.Node event handler 2\\\\nI1124 07:57:53.045707 6118 factory.go:656] Stopping watch factory\\\\nI1124 07:57:53.045730 6118 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.357661 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.357712 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.357723 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.357741 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.357752 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:54Z","lastTransitionTime":"2025-11-24T07:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.367195 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pdtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c031c6f0-57e9-4339-ac63-323d1effb276\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39a35927a133bd73526e8f1e2a15274bcc887d91b27b0860cd3260189cae5c0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8qn6m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pdtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:54 crc kubenswrapper[4691]: E1124 07:57:54.369502 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.372845 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.372897 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.372910 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.372932 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.372947 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:54Z","lastTransitionTime":"2025-11-24T07:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:54 crc kubenswrapper[4691]: E1124 07:57:54.386771 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.390716 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.390771 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.390784 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.390807 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.390823 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:54Z","lastTransitionTime":"2025-11-24T07:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:54 crc kubenswrapper[4691]: E1124 07:57:54.405145 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.409329 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.409378 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.409395 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.409420 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.409437 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:54Z","lastTransitionTime":"2025-11-24T07:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:54 crc kubenswrapper[4691]: E1124 07:57:54.421778 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:54 crc kubenswrapper[4691]: E1124 07:57:54.421937 4691 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.424007 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.424038 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.424051 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.424073 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.424087 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:54Z","lastTransitionTime":"2025-11-24T07:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.526245 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.526290 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.526316 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.526337 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.526349 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:54Z","lastTransitionTime":"2025-11-24T07:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.630102 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.630161 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.630173 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.630201 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.630216 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:54Z","lastTransitionTime":"2025-11-24T07:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.727695 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp"] Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.728725 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.731949 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.733220 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.733263 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.733278 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.733302 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.733317 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:54Z","lastTransitionTime":"2025-11-24T07:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.733429 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.757680 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.759799 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.759819 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:57:54 crc kubenswrapper[4691]: E1124 07:57:54.759974 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:57:54 crc kubenswrapper[4691]: E1124 07:57:54.760204 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.781292 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.798099 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.802563 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/285fed6b-5793-4f67-8f5a-8bb6bebccfa2-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-4bjjp\" (UID: \"285fed6b-5793-4f67-8f5a-8bb6bebccfa2\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.802627 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/285fed6b-5793-4f67-8f5a-8bb6bebccfa2-env-overrides\") pod \"ovnkube-control-plane-749d76644c-4bjjp\" (UID: \"285fed6b-5793-4f67-8f5a-8bb6bebccfa2\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.802655 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/285fed6b-5793-4f67-8f5a-8bb6bebccfa2-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-4bjjp\" (UID: \"285fed6b-5793-4f67-8f5a-8bb6bebccfa2\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.802695 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5zkxj\" (UniqueName: \"kubernetes.io/projected/285fed6b-5793-4f67-8f5a-8bb6bebccfa2-kube-api-access-5zkxj\") pod \"ovnkube-control-plane-749d76644c-4bjjp\" (UID: \"285fed6b-5793-4f67-8f5a-8bb6bebccfa2\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.814681 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pdtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c031c6f0-57e9-4339-ac63-323d1effb276\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39a35927a133bd73526e8f1e2a15274bcc887d91b27b0860cd3260189cae5c0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8qn6m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pdtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.837143 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.837217 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.837232 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.837265 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.837294 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:54Z","lastTransitionTime":"2025-11-24T07:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.842260 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.856216 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.870540 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.884808 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.903800 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/285fed6b-5793-4f67-8f5a-8bb6bebccfa2-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-4bjjp\" (UID: \"285fed6b-5793-4f67-8f5a-8bb6bebccfa2\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.903916 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/285fed6b-5793-4f67-8f5a-8bb6bebccfa2-env-overrides\") pod \"ovnkube-control-plane-749d76644c-4bjjp\" (UID: \"285fed6b-5793-4f67-8f5a-8bb6bebccfa2\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.903957 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/285fed6b-5793-4f67-8f5a-8bb6bebccfa2-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-4bjjp\" (UID: \"285fed6b-5793-4f67-8f5a-8bb6bebccfa2\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.904035 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5zkxj\" (UniqueName: \"kubernetes.io/projected/285fed6b-5793-4f67-8f5a-8bb6bebccfa2-kube-api-access-5zkxj\") pod \"ovnkube-control-plane-749d76644c-4bjjp\" (UID: \"285fed6b-5793-4f67-8f5a-8bb6bebccfa2\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.905417 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/285fed6b-5793-4f67-8f5a-8bb6bebccfa2-env-overrides\") pod \"ovnkube-control-plane-749d76644c-4bjjp\" (UID: \"285fed6b-5793-4f67-8f5a-8bb6bebccfa2\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.905420 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/285fed6b-5793-4f67-8f5a-8bb6bebccfa2-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-4bjjp\" (UID: \"285fed6b-5793-4f67-8f5a-8bb6bebccfa2\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.908891 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff52c2a30eba958adc42ad51a8be59aa9a48a634e4ab31a247d12af6c72335ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b7efa7bbcd921fadd3fa72c88ada484cac792b21c57f32f23f5304e8960068b2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T07:57:51Z\\\",\\\"message\\\":\\\"o:190] Sending *v1.Pod event handler 3 for removal\\\\nI1124 07:57:51.241034 5973 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1124 07:57:51.241096 5973 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1124 07:57:51.241278 5973 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 07:57:51.241302 5973 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 07:57:51.241316 5973 handler.go:208] Removed *v1.Node event handler 2\\\\nI1124 07:57:51.241328 5973 handler.go:208] Removed *v1.Node event handler 7\\\\nI1124 07:57:51.241341 5973 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1124 07:57:51.241538 5973 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 07:57:51.242286 5973 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1124 07:57:51.242323 5973 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1124 07:57:51.242359 5973 factory.go:656] Stopping watch factory\\\\nI1124 07:57:51.242382 5973 ovnkube.go:599] Stopped ovnkube\\\\nI1124 07:57:51.242404 5973 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1124 07:57:5\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff52c2a30eba958adc42ad51a8be59aa9a48a634e4ab31a247d12af6c72335ea\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T07:57:53Z\\\",\\\"message\\\":\\\"tor.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 07:57:53.045562 6118 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1124 07:57:53.045585 6118 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1124 07:57:53.045608 6118 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1124 07:57:53.045613 6118 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1124 07:57:53.045643 6118 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 07:57:53.045653 6118 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 07:57:53.045650 6118 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1124 07:57:53.045664 6118 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1124 07:57:53.045668 6118 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1124 07:57:53.045673 6118 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 07:57:53.045682 6118 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 07:57:53.045707 6118 handler.go:208] Removed *v1.Node event handler 2\\\\nI1124 07:57:53.045707 6118 factory.go:656] Stopping watch factory\\\\nI1124 07:57:53.045730 6118 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.915849 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/285fed6b-5793-4f67-8f5a-8bb6bebccfa2-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-4bjjp\" (UID: \"285fed6b-5793-4f67-8f5a-8bb6bebccfa2\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.925791 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5zkxj\" (UniqueName: \"kubernetes.io/projected/285fed6b-5793-4f67-8f5a-8bb6bebccfa2-kube-api-access-5zkxj\") pod \"ovnkube-control-plane-749d76644c-4bjjp\" (UID: \"285fed6b-5793-4f67-8f5a-8bb6bebccfa2\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.927088 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.941027 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.941102 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.941111 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.941129 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.941139 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:54Z","lastTransitionTime":"2025-11-24T07:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.942024 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.955999 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"285fed6b-5793-4f67-8f5a-8bb6bebccfa2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4bjjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.973682 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:54 crc kubenswrapper[4691]: I1124 07:57:54.987090 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.000160 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:54Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.021859 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://730c8a45dbc29d5f37334a467817701d86334096f481ad6e97debbffad15b8ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:55Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.043790 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.043846 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.043861 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.043883 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.043899 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:55Z","lastTransitionTime":"2025-11-24T07:57:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.046245 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" Nov 24 07:57:55 crc kubenswrapper[4691]: W1124 07:57:55.060177 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod285fed6b_5793_4f67_8f5a_8bb6bebccfa2.slice/crio-b723b7b6efcaca7c2a4baa3943f45ce5a2e5ead638ca59c8eeb7ed763d7d4d98 WatchSource:0}: Error finding container b723b7b6efcaca7c2a4baa3943f45ce5a2e5ead638ca59c8eeb7ed763d7d4d98: Status 404 returned error can't find the container with id b723b7b6efcaca7c2a4baa3943f45ce5a2e5ead638ca59c8eeb7ed763d7d4d98 Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.065875 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6f24c_106a6e78-a004-4232-a0a2-efecf2f7c248/ovnkube-controller/1.log" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.070809 4691 scope.go:117] "RemoveContainer" containerID="ff52c2a30eba958adc42ad51a8be59aa9a48a634e4ab31a247d12af6c72335ea" Nov 24 07:57:55 crc kubenswrapper[4691]: E1124 07:57:55.071013 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-6f24c_openshift-ovn-kubernetes(106a6e78-a004-4232-a0a2-efecf2f7c248)\"" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.086172 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:55Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.098645 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:55Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.110153 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:55Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.131595 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:55Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.146405 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.146434 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.146447 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.146482 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.146495 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:55Z","lastTransitionTime":"2025-11-24T07:57:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.152562 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:55Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.250124 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.250185 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.250204 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.250232 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.250260 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:55Z","lastTransitionTime":"2025-11-24T07:57:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.353941 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.354008 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.354026 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.354058 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.354082 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:55Z","lastTransitionTime":"2025-11-24T07:57:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.465236 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.465283 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.465303 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.465325 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.465483 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:55Z","lastTransitionTime":"2025-11-24T07:57:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.570226 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.570300 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.570341 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.570380 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.760542 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:57:55 crc kubenswrapper[4691]: E1124 07:57:55.760854 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.875601 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:55Z","lastTransitionTime":"2025-11-24T07:57:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.879231 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:55Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.903435 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:55Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.933186 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff52c2a30eba958adc42ad51a8be59aa9a48a634e4ab31a247d12af6c72335ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff52c2a30eba958adc42ad51a8be59aa9a48a634e4ab31a247d12af6c72335ea\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T07:57:53Z\\\",\\\"message\\\":\\\"tor.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 07:57:53.045562 6118 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1124 07:57:53.045585 6118 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1124 07:57:53.045608 6118 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1124 07:57:53.045613 6118 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1124 07:57:53.045643 6118 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 07:57:53.045653 6118 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 07:57:53.045650 6118 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1124 07:57:53.045664 6118 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1124 07:57:53.045668 6118 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1124 07:57:53.045673 6118 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 07:57:53.045682 6118 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 07:57:53.045707 6118 handler.go:208] Removed *v1.Node event handler 2\\\\nI1124 07:57:53.045707 6118 factory.go:656] Stopping watch factory\\\\nI1124 07:57:53.045730 6118 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-6f24c_openshift-ovn-kubernetes(106a6e78-a004-4232-a0a2-efecf2f7c248)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:55Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.948251 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pdtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c031c6f0-57e9-4339-ac63-323d1effb276\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39a35927a133bd73526e8f1e2a15274bcc887d91b27b0860cd3260189cae5c0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8qn6m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pdtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:55Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.963435 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:55Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.977552 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:55Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.979140 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.979192 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.979207 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.979229 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.979241 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:55Z","lastTransitionTime":"2025-11-24T07:57:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:55 crc kubenswrapper[4691]: I1124 07:57:55.989914 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"285fed6b-5793-4f67-8f5a-8bb6bebccfa2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4bjjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:55Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.004539 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:56Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.021487 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:56Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.034438 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:56Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.051225 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://730c8a45dbc29d5f37334a467817701d86334096f481ad6e97debbffad15b8ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:56Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.077367 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" event={"ID":"285fed6b-5793-4f67-8f5a-8bb6bebccfa2","Type":"ContainerStarted","Data":"f5be30d4ad1d84663c3eb3b0bbca07fc116e71aef333d1a3ef6f669b87d96260"} Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.077428 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" event={"ID":"285fed6b-5793-4f67-8f5a-8bb6bebccfa2","Type":"ContainerStarted","Data":"b723b7b6efcaca7c2a4baa3943f45ce5a2e5ead638ca59c8eeb7ed763d7d4d98"} Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.081479 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.081541 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.081553 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.081574 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.081588 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:56Z","lastTransitionTime":"2025-11-24T07:57:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.184714 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.184809 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.184835 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.184868 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.184889 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:56Z","lastTransitionTime":"2025-11-24T07:57:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.288116 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.288731 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.288745 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.288766 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.289192 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:56Z","lastTransitionTime":"2025-11-24T07:57:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.393332 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.393406 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.393432 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.393520 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.393552 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:56Z","lastTransitionTime":"2025-11-24T07:57:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.496290 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.496352 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.496364 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.496382 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.496395 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:56Z","lastTransitionTime":"2025-11-24T07:57:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.599758 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.599826 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.599847 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.599877 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.599896 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:56Z","lastTransitionTime":"2025-11-24T07:57:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.703470 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.703516 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.703527 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.703543 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.703553 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:56Z","lastTransitionTime":"2025-11-24T07:57:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.760562 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.760618 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:57:56 crc kubenswrapper[4691]: E1124 07:57:56.760827 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:57:56 crc kubenswrapper[4691]: E1124 07:57:56.761025 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.806132 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.806175 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.806189 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.806204 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.806215 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:56Z","lastTransitionTime":"2025-11-24T07:57:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.909285 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.909373 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.909392 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.909426 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.909474 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:56Z","lastTransitionTime":"2025-11-24T07:57:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.921530 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-98whr"] Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.922334 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:57:56 crc kubenswrapper[4691]: E1124 07:57:56.922489 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.942123 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:56Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.959507 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:56Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:56 crc kubenswrapper[4691]: I1124 07:57:56.980134 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:56Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.003237 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://730c8a45dbc29d5f37334a467817701d86334096f481ad6e97debbffad15b8ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:57Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.012206 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.012267 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.012279 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.012298 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.012312 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:57Z","lastTransitionTime":"2025-11-24T07:57:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.025165 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:57Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.033343 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/21147e4f-4335-4c12-9a81-aa333d8301db-metrics-certs\") pod \"network-metrics-daemon-98whr\" (UID: \"21147e4f-4335-4c12-9a81-aa333d8301db\") " pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.033477 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vvd6g\" (UniqueName: \"kubernetes.io/projected/21147e4f-4335-4c12-9a81-aa333d8301db-kube-api-access-vvd6g\") pod \"network-metrics-daemon-98whr\" (UID: \"21147e4f-4335-4c12-9a81-aa333d8301db\") " pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.043651 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:57Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.059169 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:57Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.106106 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" event={"ID":"285fed6b-5793-4f67-8f5a-8bb6bebccfa2","Type":"ContainerStarted","Data":"8c51bf95dda4f798cd9f1594fcdbeb3cbc4e53831358b6b16227bd2243f301f2"} Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.115268 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.115330 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.115347 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.115375 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.115394 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:57Z","lastTransitionTime":"2025-11-24T07:57:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.118539 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:57Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.134803 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/21147e4f-4335-4c12-9a81-aa333d8301db-metrics-certs\") pod \"network-metrics-daemon-98whr\" (UID: \"21147e4f-4335-4c12-9a81-aa333d8301db\") " pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.134892 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vvd6g\" (UniqueName: \"kubernetes.io/projected/21147e4f-4335-4c12-9a81-aa333d8301db-kube-api-access-vvd6g\") pod \"network-metrics-daemon-98whr\" (UID: \"21147e4f-4335-4c12-9a81-aa333d8301db\") " pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:57:57 crc kubenswrapper[4691]: E1124 07:57:57.135534 4691 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 07:57:57 crc kubenswrapper[4691]: E1124 07:57:57.135621 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/21147e4f-4335-4c12-9a81-aa333d8301db-metrics-certs podName:21147e4f-4335-4c12-9a81-aa333d8301db nodeName:}" failed. No retries permitted until 2025-11-24 07:57:57.63559303 +0000 UTC m=+39.634542299 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/21147e4f-4335-4c12-9a81-aa333d8301db-metrics-certs") pod "network-metrics-daemon-98whr" (UID: "21147e4f-4335-4c12-9a81-aa333d8301db") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.147072 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:57Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.162696 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vvd6g\" (UniqueName: \"kubernetes.io/projected/21147e4f-4335-4c12-9a81-aa333d8301db-kube-api-access-vvd6g\") pod \"network-metrics-daemon-98whr\" (UID: \"21147e4f-4335-4c12-9a81-aa333d8301db\") " pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.172162 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:57Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.184155 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:57Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.203882 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff52c2a30eba958adc42ad51a8be59aa9a48a634e4ab31a247d12af6c72335ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff52c2a30eba958adc42ad51a8be59aa9a48a634e4ab31a247d12af6c72335ea\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T07:57:53Z\\\",\\\"message\\\":\\\"tor.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 07:57:53.045562 6118 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1124 07:57:53.045585 6118 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1124 07:57:53.045608 6118 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1124 07:57:53.045613 6118 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1124 07:57:53.045643 6118 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 07:57:53.045653 6118 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 07:57:53.045650 6118 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1124 07:57:53.045664 6118 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1124 07:57:53.045668 6118 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1124 07:57:53.045673 6118 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 07:57:53.045682 6118 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 07:57:53.045707 6118 handler.go:208] Removed *v1.Node event handler 2\\\\nI1124 07:57:53.045707 6118 factory.go:656] Stopping watch factory\\\\nI1124 07:57:53.045730 6118 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-6f24c_openshift-ovn-kubernetes(106a6e78-a004-4232-a0a2-efecf2f7c248)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:57Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.219565 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pdtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c031c6f0-57e9-4339-ac63-323d1effb276\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39a35927a133bd73526e8f1e2a15274bcc887d91b27b0860cd3260189cae5c0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8qn6m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pdtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:57Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.219715 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.219754 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.219768 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.219789 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.219808 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:57Z","lastTransitionTime":"2025-11-24T07:57:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.236042 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:57Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.259901 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:57Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.275377 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"285fed6b-5793-4f67-8f5a-8bb6bebccfa2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4bjjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:57Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.290032 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-98whr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21147e4f-4335-4c12-9a81-aa333d8301db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvd6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvd6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-98whr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:57Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.306345 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:57Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.322392 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.322471 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.322494 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.322548 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.322564 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:57Z","lastTransitionTime":"2025-11-24T07:57:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.326019 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:57Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.344554 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:57Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.362906 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:57Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.377770 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:57Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.389542 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:57Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.410196 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff52c2a30eba958adc42ad51a8be59aa9a48a634e4ab31a247d12af6c72335ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff52c2a30eba958adc42ad51a8be59aa9a48a634e4ab31a247d12af6c72335ea\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T07:57:53Z\\\",\\\"message\\\":\\\"tor.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 07:57:53.045562 6118 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1124 07:57:53.045585 6118 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1124 07:57:53.045608 6118 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1124 07:57:53.045613 6118 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1124 07:57:53.045643 6118 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 07:57:53.045653 6118 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 07:57:53.045650 6118 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1124 07:57:53.045664 6118 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1124 07:57:53.045668 6118 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1124 07:57:53.045673 6118 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 07:57:53.045682 6118 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 07:57:53.045707 6118 handler.go:208] Removed *v1.Node event handler 2\\\\nI1124 07:57:53.045707 6118 factory.go:656] Stopping watch factory\\\\nI1124 07:57:53.045730 6118 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-6f24c_openshift-ovn-kubernetes(106a6e78-a004-4232-a0a2-efecf2f7c248)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:57Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.420587 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pdtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c031c6f0-57e9-4339-ac63-323d1effb276\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39a35927a133bd73526e8f1e2a15274bcc887d91b27b0860cd3260189cae5c0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8qn6m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pdtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:57Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.424824 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.424879 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.424894 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.424918 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.424933 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:57Z","lastTransitionTime":"2025-11-24T07:57:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.447724 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:57Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.462418 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:57Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.476113 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"285fed6b-5793-4f67-8f5a-8bb6bebccfa2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5be30d4ad1d84663c3eb3b0bbca07fc116e71aef333d1a3ef6f669b87d96260\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c51bf95dda4f798cd9f1594fcdbeb3cbc4e53831358b6b16227bd2243f301f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4bjjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:57Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.486224 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-98whr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21147e4f-4335-4c12-9a81-aa333d8301db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvd6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvd6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-98whr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:57Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.500190 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:57Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.514016 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:57Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.528056 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.528102 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.528115 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.528133 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.528147 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:57Z","lastTransitionTime":"2025-11-24T07:57:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.529882 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:57Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.547736 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://730c8a45dbc29d5f37334a467817701d86334096f481ad6e97debbffad15b8ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:57Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.563862 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:57Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.631550 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.631623 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.631639 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.631666 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.631685 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:57Z","lastTransitionTime":"2025-11-24T07:57:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.641672 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/21147e4f-4335-4c12-9a81-aa333d8301db-metrics-certs\") pod \"network-metrics-daemon-98whr\" (UID: \"21147e4f-4335-4c12-9a81-aa333d8301db\") " pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:57:57 crc kubenswrapper[4691]: E1124 07:57:57.641935 4691 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 07:57:57 crc kubenswrapper[4691]: E1124 07:57:57.642019 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/21147e4f-4335-4c12-9a81-aa333d8301db-metrics-certs podName:21147e4f-4335-4c12-9a81-aa333d8301db nodeName:}" failed. No retries permitted until 2025-11-24 07:57:58.641999447 +0000 UTC m=+40.640948696 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/21147e4f-4335-4c12-9a81-aa333d8301db-metrics-certs") pod "network-metrics-daemon-98whr" (UID: "21147e4f-4335-4c12-9a81-aa333d8301db") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.735115 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.735185 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.735195 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.735210 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.735223 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:57Z","lastTransitionTime":"2025-11-24T07:57:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.759909 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:57:57 crc kubenswrapper[4691]: E1124 07:57:57.760151 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.838820 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.838904 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.838923 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.838963 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.838985 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:57Z","lastTransitionTime":"2025-11-24T07:57:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.942585 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.942680 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.942723 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.942774 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:57 crc kubenswrapper[4691]: I1124 07:57:57.942804 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:57Z","lastTransitionTime":"2025-11-24T07:57:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.046290 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.046396 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.046419 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.046493 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.046519 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:58Z","lastTransitionTime":"2025-11-24T07:57:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.149484 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.149536 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.149548 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.149570 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.149583 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:58Z","lastTransitionTime":"2025-11-24T07:57:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.252933 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.252994 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.253013 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.253040 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.253060 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:58Z","lastTransitionTime":"2025-11-24T07:57:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.356313 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.356386 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.356409 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.356441 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.356489 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:58Z","lastTransitionTime":"2025-11-24T07:57:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.460436 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.460531 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.460549 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.460575 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.460596 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:58Z","lastTransitionTime":"2025-11-24T07:57:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.564815 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.564898 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.564923 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.564957 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.564983 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:58Z","lastTransitionTime":"2025-11-24T07:57:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.653012 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/21147e4f-4335-4c12-9a81-aa333d8301db-metrics-certs\") pod \"network-metrics-daemon-98whr\" (UID: \"21147e4f-4335-4c12-9a81-aa333d8301db\") " pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:57:58 crc kubenswrapper[4691]: E1124 07:57:58.653313 4691 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 07:57:58 crc kubenswrapper[4691]: E1124 07:57:58.653512 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/21147e4f-4335-4c12-9a81-aa333d8301db-metrics-certs podName:21147e4f-4335-4c12-9a81-aa333d8301db nodeName:}" failed. No retries permitted until 2025-11-24 07:58:00.653425512 +0000 UTC m=+42.652374931 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/21147e4f-4335-4c12-9a81-aa333d8301db-metrics-certs") pod "network-metrics-daemon-98whr" (UID: "21147e4f-4335-4c12-9a81-aa333d8301db") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.669108 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.669149 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.669163 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.669182 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.669197 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:58Z","lastTransitionTime":"2025-11-24T07:57:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.760374 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.760548 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:57:58 crc kubenswrapper[4691]: E1124 07:57:58.760728 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.760757 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:57:58 crc kubenswrapper[4691]: E1124 07:57:58.761065 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:57:58 crc kubenswrapper[4691]: E1124 07:57:58.761125 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.771884 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.771947 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.771965 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.771986 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.772007 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:58Z","lastTransitionTime":"2025-11-24T07:57:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.778525 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:58Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.797656 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:58Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.816235 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:58Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.845124 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff52c2a30eba958adc42ad51a8be59aa9a48a634e4ab31a247d12af6c72335ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff52c2a30eba958adc42ad51a8be59aa9a48a634e4ab31a247d12af6c72335ea\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T07:57:53Z\\\",\\\"message\\\":\\\"tor.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 07:57:53.045562 6118 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1124 07:57:53.045585 6118 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1124 07:57:53.045608 6118 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1124 07:57:53.045613 6118 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1124 07:57:53.045643 6118 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 07:57:53.045653 6118 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 07:57:53.045650 6118 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1124 07:57:53.045664 6118 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1124 07:57:53.045668 6118 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1124 07:57:53.045673 6118 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 07:57:53.045682 6118 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 07:57:53.045707 6118 handler.go:208] Removed *v1.Node event handler 2\\\\nI1124 07:57:53.045707 6118 factory.go:656] Stopping watch factory\\\\nI1124 07:57:53.045730 6118 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-6f24c_openshift-ovn-kubernetes(106a6e78-a004-4232-a0a2-efecf2f7c248)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:58Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.864169 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pdtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c031c6f0-57e9-4339-ac63-323d1effb276\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39a35927a133bd73526e8f1e2a15274bcc887d91b27b0860cd3260189cae5c0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8qn6m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pdtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:58Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.873981 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.874045 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.874072 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.874108 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.874136 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:58Z","lastTransitionTime":"2025-11-24T07:57:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.892991 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:58Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.913723 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:58Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.931866 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:58Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.945666 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:58Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.965568 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:58Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.976507 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.976562 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.976578 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.976601 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.976616 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:58Z","lastTransitionTime":"2025-11-24T07:57:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:58 crc kubenswrapper[4691]: I1124 07:57:58.988544 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:58Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.006643 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"285fed6b-5793-4f67-8f5a-8bb6bebccfa2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5be30d4ad1d84663c3eb3b0bbca07fc116e71aef333d1a3ef6f669b87d96260\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c51bf95dda4f798cd9f1594fcdbeb3cbc4e53831358b6b16227bd2243f301f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4bjjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:59Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.023414 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-98whr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21147e4f-4335-4c12-9a81-aa333d8301db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvd6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvd6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-98whr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:59Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.044380 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:59Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.067074 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:59Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.080045 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.080408 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.080676 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.080824 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.080977 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:59Z","lastTransitionTime":"2025-11-24T07:57:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.087997 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:59Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.105918 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://730c8a45dbc29d5f37334a467817701d86334096f481ad6e97debbffad15b8ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:57:59Z is after 2025-08-24T17:21:41Z" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.185027 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.185104 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.185123 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.185154 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.185175 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:59Z","lastTransitionTime":"2025-11-24T07:57:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.288851 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.288938 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.288960 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.288992 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.289013 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:59Z","lastTransitionTime":"2025-11-24T07:57:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.393067 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.393145 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.393166 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.393197 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.393218 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:59Z","lastTransitionTime":"2025-11-24T07:57:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.496643 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.496710 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.496722 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.496779 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.496798 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:59Z","lastTransitionTime":"2025-11-24T07:57:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.601163 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.601217 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.601230 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.601256 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.601272 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:59Z","lastTransitionTime":"2025-11-24T07:57:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.705054 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.705168 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.705189 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.705213 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.705235 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:59Z","lastTransitionTime":"2025-11-24T07:57:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.760179 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:57:59 crc kubenswrapper[4691]: E1124 07:57:59.760305 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.809894 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.809958 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.809970 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.809994 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.810010 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:59Z","lastTransitionTime":"2025-11-24T07:57:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.913417 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.913580 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.913602 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.913628 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:57:59 crc kubenswrapper[4691]: I1124 07:57:59.913643 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:57:59Z","lastTransitionTime":"2025-11-24T07:57:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.017572 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.017680 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.017718 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.017758 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.017786 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:00Z","lastTransitionTime":"2025-11-24T07:58:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.098556 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.099416 4691 scope.go:117] "RemoveContainer" containerID="ff52c2a30eba958adc42ad51a8be59aa9a48a634e4ab31a247d12af6c72335ea" Nov 24 07:58:00 crc kubenswrapper[4691]: E1124 07:58:00.099612 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-6f24c_openshift-ovn-kubernetes(106a6e78-a004-4232-a0a2-efecf2f7c248)\"" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.119550 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.119625 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.119641 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.119663 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.119680 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:00Z","lastTransitionTime":"2025-11-24T07:58:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.222677 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.222733 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.222750 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.222774 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.222792 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:00Z","lastTransitionTime":"2025-11-24T07:58:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.325314 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.325385 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.325402 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.325428 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.325477 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:00Z","lastTransitionTime":"2025-11-24T07:58:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.428681 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.428762 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.428781 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.428806 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.428820 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:00Z","lastTransitionTime":"2025-11-24T07:58:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.532774 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.532865 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.532883 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.532912 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.532935 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:00Z","lastTransitionTime":"2025-11-24T07:58:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.636053 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.636118 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.636127 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.636142 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.636153 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:00Z","lastTransitionTime":"2025-11-24T07:58:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.678121 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/21147e4f-4335-4c12-9a81-aa333d8301db-metrics-certs\") pod \"network-metrics-daemon-98whr\" (UID: \"21147e4f-4335-4c12-9a81-aa333d8301db\") " pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:58:00 crc kubenswrapper[4691]: E1124 07:58:00.678277 4691 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 07:58:00 crc kubenswrapper[4691]: E1124 07:58:00.678340 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/21147e4f-4335-4c12-9a81-aa333d8301db-metrics-certs podName:21147e4f-4335-4c12-9a81-aa333d8301db nodeName:}" failed. No retries permitted until 2025-11-24 07:58:04.678324478 +0000 UTC m=+46.677273717 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/21147e4f-4335-4c12-9a81-aa333d8301db-metrics-certs") pod "network-metrics-daemon-98whr" (UID: "21147e4f-4335-4c12-9a81-aa333d8301db") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.738795 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.738837 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.738846 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.738859 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.738868 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:00Z","lastTransitionTime":"2025-11-24T07:58:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.759786 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:58:00 crc kubenswrapper[4691]: E1124 07:58:00.760095 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.760259 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.760987 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:58:00 crc kubenswrapper[4691]: E1124 07:58:00.761413 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:58:00 crc kubenswrapper[4691]: E1124 07:58:00.765188 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.841886 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.841997 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.842013 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.842040 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.842057 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:00Z","lastTransitionTime":"2025-11-24T07:58:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.946408 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.946485 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.946499 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.946523 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:00 crc kubenswrapper[4691]: I1124 07:58:00.946538 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:00Z","lastTransitionTime":"2025-11-24T07:58:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.050439 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.050535 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.050549 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.050574 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.050591 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:01Z","lastTransitionTime":"2025-11-24T07:58:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.154154 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.154222 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.154241 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.154268 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.154288 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:01Z","lastTransitionTime":"2025-11-24T07:58:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.257105 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.257824 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.257917 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.258055 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.258141 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:01Z","lastTransitionTime":"2025-11-24T07:58:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.361888 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.361975 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.362011 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.362045 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.362063 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:01Z","lastTransitionTime":"2025-11-24T07:58:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.465235 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.465316 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.465337 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.465370 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.465392 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:01Z","lastTransitionTime":"2025-11-24T07:58:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.568785 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.568847 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.568864 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.568894 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.568915 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:01Z","lastTransitionTime":"2025-11-24T07:58:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.672735 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.672826 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.672848 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.672880 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.672900 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:01Z","lastTransitionTime":"2025-11-24T07:58:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.760234 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:58:01 crc kubenswrapper[4691]: E1124 07:58:01.760521 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.775926 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.776003 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.776017 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.776039 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.776051 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:01Z","lastTransitionTime":"2025-11-24T07:58:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.880052 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.880152 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.880170 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.880195 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.880211 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:01Z","lastTransitionTime":"2025-11-24T07:58:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.985859 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.985991 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.986016 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.986047 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:01 crc kubenswrapper[4691]: I1124 07:58:01.986070 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:01Z","lastTransitionTime":"2025-11-24T07:58:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.089817 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.089879 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.089893 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.089918 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.089931 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:02Z","lastTransitionTime":"2025-11-24T07:58:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.193034 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.193128 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.193157 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.193194 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.193230 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:02Z","lastTransitionTime":"2025-11-24T07:58:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.297136 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.297232 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.297282 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.297319 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.297346 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:02Z","lastTransitionTime":"2025-11-24T07:58:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.400652 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.400711 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.400725 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.400748 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.400836 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:02Z","lastTransitionTime":"2025-11-24T07:58:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.504530 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.504592 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.504603 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.504619 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.504630 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:02Z","lastTransitionTime":"2025-11-24T07:58:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.607025 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.607069 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.607078 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.607093 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.607107 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:02Z","lastTransitionTime":"2025-11-24T07:58:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.709963 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.710035 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.710058 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.710087 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.710108 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:02Z","lastTransitionTime":"2025-11-24T07:58:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.759618 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.759706 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:58:02 crc kubenswrapper[4691]: E1124 07:58:02.759830 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.759923 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:58:02 crc kubenswrapper[4691]: E1124 07:58:02.760097 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:58:02 crc kubenswrapper[4691]: E1124 07:58:02.760323 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.813081 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.813137 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.813149 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.813170 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.813184 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:02Z","lastTransitionTime":"2025-11-24T07:58:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.916320 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.916416 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.916437 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.916507 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:02 crc kubenswrapper[4691]: I1124 07:58:02.916531 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:02Z","lastTransitionTime":"2025-11-24T07:58:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.020513 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.020584 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.020600 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.020624 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.020637 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:03Z","lastTransitionTime":"2025-11-24T07:58:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.124422 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.124490 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.124502 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.124522 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.124534 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:03Z","lastTransitionTime":"2025-11-24T07:58:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.227756 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.227829 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.227838 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.227852 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.227866 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:03Z","lastTransitionTime":"2025-11-24T07:58:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.331391 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.331503 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.331530 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.331565 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.331591 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:03Z","lastTransitionTime":"2025-11-24T07:58:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.435023 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.435138 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.435160 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.435190 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.435211 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:03Z","lastTransitionTime":"2025-11-24T07:58:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.538672 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.538736 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.538750 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.538767 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.538779 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:03Z","lastTransitionTime":"2025-11-24T07:58:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.641727 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.641791 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.641809 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.641837 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.641856 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:03Z","lastTransitionTime":"2025-11-24T07:58:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.745632 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.745693 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.745712 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.745740 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.745759 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:03Z","lastTransitionTime":"2025-11-24T07:58:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.760442 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:58:03 crc kubenswrapper[4691]: E1124 07:58:03.760679 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.849721 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.849826 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.849855 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.849889 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.849928 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:03Z","lastTransitionTime":"2025-11-24T07:58:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.953258 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.953313 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.953325 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.953345 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:03 crc kubenswrapper[4691]: I1124 07:58:03.953361 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:03Z","lastTransitionTime":"2025-11-24T07:58:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.056705 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.056801 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.056820 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.056849 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.056869 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:04Z","lastTransitionTime":"2025-11-24T07:58:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.159024 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.159066 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.159075 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.159088 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.159097 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:04Z","lastTransitionTime":"2025-11-24T07:58:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.262254 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.262317 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.262331 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.262350 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.262364 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:04Z","lastTransitionTime":"2025-11-24T07:58:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.366632 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.366709 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.366726 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.366755 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.366777 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:04Z","lastTransitionTime":"2025-11-24T07:58:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.470787 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.470884 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.470904 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.470935 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.470956 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:04Z","lastTransitionTime":"2025-11-24T07:58:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.574637 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.574698 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.574708 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.574726 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.574738 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:04Z","lastTransitionTime":"2025-11-24T07:58:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.678257 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.678530 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.678544 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.678565 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.678579 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:04Z","lastTransitionTime":"2025-11-24T07:58:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.729916 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/21147e4f-4335-4c12-9a81-aa333d8301db-metrics-certs\") pod \"network-metrics-daemon-98whr\" (UID: \"21147e4f-4335-4c12-9a81-aa333d8301db\") " pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:58:04 crc kubenswrapper[4691]: E1124 07:58:04.730183 4691 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 07:58:04 crc kubenswrapper[4691]: E1124 07:58:04.730296 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/21147e4f-4335-4c12-9a81-aa333d8301db-metrics-certs podName:21147e4f-4335-4c12-9a81-aa333d8301db nodeName:}" failed. No retries permitted until 2025-11-24 07:58:12.73026985 +0000 UTC m=+54.729219119 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/21147e4f-4335-4c12-9a81-aa333d8301db-metrics-certs") pod "network-metrics-daemon-98whr" (UID: "21147e4f-4335-4c12-9a81-aa333d8301db") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.738907 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.738950 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.738959 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.738975 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.738985 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:04Z","lastTransitionTime":"2025-11-24T07:58:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:04 crc kubenswrapper[4691]: E1124 07:58:04.751321 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:04Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.755703 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.755734 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.755746 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.755763 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.755774 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:04Z","lastTransitionTime":"2025-11-24T07:58:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.759708 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.759726 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.759800 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:58:04 crc kubenswrapper[4691]: E1124 07:58:04.759928 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:58:04 crc kubenswrapper[4691]: E1124 07:58:04.760092 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:58:04 crc kubenswrapper[4691]: E1124 07:58:04.760365 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:58:04 crc kubenswrapper[4691]: E1124 07:58:04.768815 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:04Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.773867 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.773900 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.773909 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.773922 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.773932 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:04Z","lastTransitionTime":"2025-11-24T07:58:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:04 crc kubenswrapper[4691]: E1124 07:58:04.785824 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:04Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.789642 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.789686 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.789701 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.789723 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.789733 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:04Z","lastTransitionTime":"2025-11-24T07:58:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:04 crc kubenswrapper[4691]: E1124 07:58:04.801181 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:04Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.804864 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.804911 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.804921 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.804937 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.804948 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:04Z","lastTransitionTime":"2025-11-24T07:58:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:04 crc kubenswrapper[4691]: E1124 07:58:04.816695 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:04Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:04 crc kubenswrapper[4691]: E1124 07:58:04.816812 4691 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.818710 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.818749 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.818759 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.818775 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.818787 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:04Z","lastTransitionTime":"2025-11-24T07:58:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.921171 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.921217 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.921227 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.921250 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:04 crc kubenswrapper[4691]: I1124 07:58:04.921261 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:04Z","lastTransitionTime":"2025-11-24T07:58:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.023807 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.023839 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.023847 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.023859 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.023883 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:05Z","lastTransitionTime":"2025-11-24T07:58:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.126308 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.126350 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.126359 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.126376 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.126386 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:05Z","lastTransitionTime":"2025-11-24T07:58:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.229216 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.229272 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.229288 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.229304 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.229316 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:05Z","lastTransitionTime":"2025-11-24T07:58:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.332224 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.332259 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.332269 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.332282 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.332309 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:05Z","lastTransitionTime":"2025-11-24T07:58:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.435648 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.435756 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.435780 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.435835 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.435858 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:05Z","lastTransitionTime":"2025-11-24T07:58:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.540314 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.540396 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.540425 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.540505 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.540534 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:05Z","lastTransitionTime":"2025-11-24T07:58:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.643833 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.643895 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.643908 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.643930 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.643944 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:05Z","lastTransitionTime":"2025-11-24T07:58:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.747398 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.747489 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.747509 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.747536 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.747554 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:05Z","lastTransitionTime":"2025-11-24T07:58:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.759948 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:58:05 crc kubenswrapper[4691]: E1124 07:58:05.760127 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.851547 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.851645 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.851670 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.851705 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.851732 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:05Z","lastTransitionTime":"2025-11-24T07:58:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.955872 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.955937 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.955950 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.955972 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:05 crc kubenswrapper[4691]: I1124 07:58:05.955986 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:05Z","lastTransitionTime":"2025-11-24T07:58:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.059909 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.059990 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.060015 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.060091 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.060112 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:06Z","lastTransitionTime":"2025-11-24T07:58:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.162711 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.162752 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.162762 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.162778 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.162794 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:06Z","lastTransitionTime":"2025-11-24T07:58:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.266004 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.266054 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.266065 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.266084 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.266097 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:06Z","lastTransitionTime":"2025-11-24T07:58:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.370147 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.370228 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.370248 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.370278 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.370300 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:06Z","lastTransitionTime":"2025-11-24T07:58:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.473625 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.473684 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.473697 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.473718 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.473732 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:06Z","lastTransitionTime":"2025-11-24T07:58:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.579356 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.579496 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.579550 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.579595 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.579624 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:06Z","lastTransitionTime":"2025-11-24T07:58:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.683608 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.683683 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.683702 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.683731 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.683752 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:06Z","lastTransitionTime":"2025-11-24T07:58:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.760086 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:58:06 crc kubenswrapper[4691]: E1124 07:58:06.760346 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.760761 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.761070 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:58:06 crc kubenswrapper[4691]: E1124 07:58:06.761072 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:58:06 crc kubenswrapper[4691]: E1124 07:58:06.761314 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.786698 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.786746 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.786756 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.786772 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.786783 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:06Z","lastTransitionTime":"2025-11-24T07:58:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.889066 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.889111 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.889123 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.889138 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.889151 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:06Z","lastTransitionTime":"2025-11-24T07:58:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.992420 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.992494 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.992505 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.992533 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:06 crc kubenswrapper[4691]: I1124 07:58:06.992551 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:06Z","lastTransitionTime":"2025-11-24T07:58:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.096115 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.096157 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.096167 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.096186 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.096198 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:07Z","lastTransitionTime":"2025-11-24T07:58:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.200354 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.200500 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.200544 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.200581 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.200635 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:07Z","lastTransitionTime":"2025-11-24T07:58:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.304162 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.304195 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.304204 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.304218 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.304229 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:07Z","lastTransitionTime":"2025-11-24T07:58:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.406890 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.407385 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.407496 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.407567 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.407650 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:07Z","lastTransitionTime":"2025-11-24T07:58:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.510849 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.510910 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.510924 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.510949 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.510966 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:07Z","lastTransitionTime":"2025-11-24T07:58:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.614647 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.614703 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.614719 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.614742 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.614762 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:07Z","lastTransitionTime":"2025-11-24T07:58:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.717926 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.718007 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.718025 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.718047 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.718059 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:07Z","lastTransitionTime":"2025-11-24T07:58:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.759845 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:58:07 crc kubenswrapper[4691]: E1124 07:58:07.760114 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.821682 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.821760 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.821793 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.821813 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.821826 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:07Z","lastTransitionTime":"2025-11-24T07:58:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.924865 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.924929 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.924947 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.924968 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:07 crc kubenswrapper[4691]: I1124 07:58:07.924984 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:07Z","lastTransitionTime":"2025-11-24T07:58:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.027679 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.027747 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.027757 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.027777 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.027789 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:08Z","lastTransitionTime":"2025-11-24T07:58:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.130094 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.130151 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.130160 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.130175 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.130184 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:08Z","lastTransitionTime":"2025-11-24T07:58:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.234493 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.234562 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.234583 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.234613 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.234630 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:08Z","lastTransitionTime":"2025-11-24T07:58:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.337226 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.337277 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.337294 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.337310 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.337321 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:08Z","lastTransitionTime":"2025-11-24T07:58:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.440263 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.440302 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.440313 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.440329 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.440340 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:08Z","lastTransitionTime":"2025-11-24T07:58:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.542585 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.542667 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.542687 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.542720 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.542758 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:08Z","lastTransitionTime":"2025-11-24T07:58:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.646072 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.646122 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.646132 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.646150 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.646163 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:08Z","lastTransitionTime":"2025-11-24T07:58:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.749766 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.749805 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.749814 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.749828 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.749839 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:08Z","lastTransitionTime":"2025-11-24T07:58:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.760510 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.760678 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:58:08 crc kubenswrapper[4691]: E1124 07:58:08.760767 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:58:08 crc kubenswrapper[4691]: E1124 07:58:08.760677 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.760813 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:58:08 crc kubenswrapper[4691]: E1124 07:58:08.760930 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.779063 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:08Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.790559 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:08Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.800345 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:08Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.809223 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:08Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.827554 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff52c2a30eba958adc42ad51a8be59aa9a48a634e4ab31a247d12af6c72335ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff52c2a30eba958adc42ad51a8be59aa9a48a634e4ab31a247d12af6c72335ea\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T07:57:53Z\\\",\\\"message\\\":\\\"tor.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 07:57:53.045562 6118 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1124 07:57:53.045585 6118 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1124 07:57:53.045608 6118 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1124 07:57:53.045613 6118 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1124 07:57:53.045643 6118 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 07:57:53.045653 6118 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 07:57:53.045650 6118 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1124 07:57:53.045664 6118 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1124 07:57:53.045668 6118 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1124 07:57:53.045673 6118 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 07:57:53.045682 6118 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 07:57:53.045707 6118 handler.go:208] Removed *v1.Node event handler 2\\\\nI1124 07:57:53.045707 6118 factory.go:656] Stopping watch factory\\\\nI1124 07:57:53.045730 6118 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-6f24c_openshift-ovn-kubernetes(106a6e78-a004-4232-a0a2-efecf2f7c248)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:08Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.838910 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pdtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c031c6f0-57e9-4339-ac63-323d1effb276\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39a35927a133bd73526e8f1e2a15274bcc887d91b27b0860cd3260189cae5c0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8qn6m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pdtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:08Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.852409 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.852463 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.852475 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.852494 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.852504 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:08Z","lastTransitionTime":"2025-11-24T07:58:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.853753 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:08Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.871340 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:08Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.882307 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"285fed6b-5793-4f67-8f5a-8bb6bebccfa2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5be30d4ad1d84663c3eb3b0bbca07fc116e71aef333d1a3ef6f669b87d96260\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c51bf95dda4f798cd9f1594fcdbeb3cbc4e53831358b6b16227bd2243f301f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4bjjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:08Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.892119 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-98whr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21147e4f-4335-4c12-9a81-aa333d8301db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvd6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvd6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-98whr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:08Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.903774 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:08Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.916274 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:08Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.928131 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:08Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.941304 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://730c8a45dbc29d5f37334a467817701d86334096f481ad6e97debbffad15b8ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:08Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.955174 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.955218 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.955227 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.955243 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.955252 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:08Z","lastTransitionTime":"2025-11-24T07:58:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.955509 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:08Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.967156 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:08Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:08 crc kubenswrapper[4691]: I1124 07:58:08.976804 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:08Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.057496 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.057539 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.057548 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.057563 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.057572 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:09Z","lastTransitionTime":"2025-11-24T07:58:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.159526 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.159588 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.159599 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.159615 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.159626 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:09Z","lastTransitionTime":"2025-11-24T07:58:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.261800 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.261863 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.261874 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.261889 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.261899 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:09Z","lastTransitionTime":"2025-11-24T07:58:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.364542 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.364575 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.364583 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.364596 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.364606 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:09Z","lastTransitionTime":"2025-11-24T07:58:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.467840 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.467904 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.467928 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.467958 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.467982 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:09Z","lastTransitionTime":"2025-11-24T07:58:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.570646 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.570686 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.570694 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.570723 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.570734 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:09Z","lastTransitionTime":"2025-11-24T07:58:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.585325 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 07:58:09 crc kubenswrapper[4691]: E1124 07:58:09.585543 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 07:58:41.585522499 +0000 UTC m=+83.584471748 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.585626 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:58:09 crc kubenswrapper[4691]: E1124 07:58:09.585768 4691 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 07:58:09 crc kubenswrapper[4691]: E1124 07:58:09.585812 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 07:58:41.585805406 +0000 UTC m=+83.584754655 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.673111 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.673185 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.673198 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.673213 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.673223 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:09Z","lastTransitionTime":"2025-11-24T07:58:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.686180 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.686271 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:58:09 crc kubenswrapper[4691]: E1124 07:58:09.686295 4691 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.686343 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:58:09 crc kubenswrapper[4691]: E1124 07:58:09.686381 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 07:58:41.686351413 +0000 UTC m=+83.685300752 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 07:58:09 crc kubenswrapper[4691]: E1124 07:58:09.686468 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 07:58:09 crc kubenswrapper[4691]: E1124 07:58:09.686487 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 07:58:09 crc kubenswrapper[4691]: E1124 07:58:09.686498 4691 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:58:09 crc kubenswrapper[4691]: E1124 07:58:09.686538 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-24 07:58:41.686522468 +0000 UTC m=+83.685471817 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:58:09 crc kubenswrapper[4691]: E1124 07:58:09.686551 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 07:58:09 crc kubenswrapper[4691]: E1124 07:58:09.686577 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 07:58:09 crc kubenswrapper[4691]: E1124 07:58:09.686595 4691 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:58:09 crc kubenswrapper[4691]: E1124 07:58:09.686673 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-24 07:58:41.686651312 +0000 UTC m=+83.685600591 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.759772 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:58:09 crc kubenswrapper[4691]: E1124 07:58:09.759911 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.775421 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.775536 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.775551 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.775568 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.775578 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:09Z","lastTransitionTime":"2025-11-24T07:58:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.878028 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.878083 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.878104 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.878126 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.878135 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:09Z","lastTransitionTime":"2025-11-24T07:58:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.980525 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.980570 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.980582 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.980599 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:09 crc kubenswrapper[4691]: I1124 07:58:09.980612 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:09Z","lastTransitionTime":"2025-11-24T07:58:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.083257 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.083307 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.083318 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.083332 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.083340 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:10Z","lastTransitionTime":"2025-11-24T07:58:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.185606 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.185654 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.185669 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.185686 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.185698 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:10Z","lastTransitionTime":"2025-11-24T07:58:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.288034 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.288075 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.288085 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.288100 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.288109 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:10Z","lastTransitionTime":"2025-11-24T07:58:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.390323 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.390378 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.390390 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.390406 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.390418 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:10Z","lastTransitionTime":"2025-11-24T07:58:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.492958 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.493024 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.493038 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.493054 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.493065 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:10Z","lastTransitionTime":"2025-11-24T07:58:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.596166 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.596209 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.596244 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.596262 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.596270 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:10Z","lastTransitionTime":"2025-11-24T07:58:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.698621 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.698683 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.698695 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.698714 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.698727 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:10Z","lastTransitionTime":"2025-11-24T07:58:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.759642 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.759699 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.759707 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:58:10 crc kubenswrapper[4691]: E1124 07:58:10.759883 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:58:10 crc kubenswrapper[4691]: E1124 07:58:10.760066 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:58:10 crc kubenswrapper[4691]: E1124 07:58:10.760117 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.800510 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.800550 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.800561 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.800575 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.800585 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:10Z","lastTransitionTime":"2025-11-24T07:58:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.903527 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.903573 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.903582 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.903596 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:10 crc kubenswrapper[4691]: I1124 07:58:10.903605 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:10Z","lastTransitionTime":"2025-11-24T07:58:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.006015 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.006082 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.006101 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.006118 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.006130 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:11Z","lastTransitionTime":"2025-11-24T07:58:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.109129 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.109219 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.109247 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.109274 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.109299 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:11Z","lastTransitionTime":"2025-11-24T07:58:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.212621 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.212679 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.212691 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.212712 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.212726 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:11Z","lastTransitionTime":"2025-11-24T07:58:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.315384 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.315490 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.315508 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.315533 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.315550 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:11Z","lastTransitionTime":"2025-11-24T07:58:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.419294 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.419348 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.419357 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.419374 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.419383 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:11Z","lastTransitionTime":"2025-11-24T07:58:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.523690 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.523740 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.523755 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.523795 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.523814 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:11Z","lastTransitionTime":"2025-11-24T07:58:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.625868 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.625953 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.625984 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.626011 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.626112 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:11Z","lastTransitionTime":"2025-11-24T07:58:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.729344 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.729381 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.729389 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.729403 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.729411 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:11Z","lastTransitionTime":"2025-11-24T07:58:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.760187 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:58:11 crc kubenswrapper[4691]: E1124 07:58:11.760283 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.833699 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.833792 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.833824 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.833864 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.833888 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:11Z","lastTransitionTime":"2025-11-24T07:58:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.937962 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.938047 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.938067 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.938097 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:11 crc kubenswrapper[4691]: I1124 07:58:11.938118 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:11Z","lastTransitionTime":"2025-11-24T07:58:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.041523 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.041577 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.041589 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.041611 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.041632 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:12Z","lastTransitionTime":"2025-11-24T07:58:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.145228 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.145293 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.145303 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.145320 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.145331 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:12Z","lastTransitionTime":"2025-11-24T07:58:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.248874 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.248950 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.248976 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.249031 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.249053 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:12Z","lastTransitionTime":"2025-11-24T07:58:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.353523 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.353624 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.353643 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.353672 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.353690 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:12Z","lastTransitionTime":"2025-11-24T07:58:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.458099 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.458211 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.458242 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.458501 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.458602 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:12Z","lastTransitionTime":"2025-11-24T07:58:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.562640 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.562703 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.562726 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.562752 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.562770 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:12Z","lastTransitionTime":"2025-11-24T07:58:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.665884 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.666383 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.666403 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.666431 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.666485 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:12Z","lastTransitionTime":"2025-11-24T07:58:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.760423 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.760546 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:58:12 crc kubenswrapper[4691]: E1124 07:58:12.760618 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:58:12 crc kubenswrapper[4691]: E1124 07:58:12.760712 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.760772 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:58:12 crc kubenswrapper[4691]: E1124 07:58:12.760819 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.761485 4691 scope.go:117] "RemoveContainer" containerID="ff52c2a30eba958adc42ad51a8be59aa9a48a634e4ab31a247d12af6c72335ea" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.770085 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.770139 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.770150 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.770197 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.770212 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:12Z","lastTransitionTime":"2025-11-24T07:58:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.830685 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/21147e4f-4335-4c12-9a81-aa333d8301db-metrics-certs\") pod \"network-metrics-daemon-98whr\" (UID: \"21147e4f-4335-4c12-9a81-aa333d8301db\") " pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:58:12 crc kubenswrapper[4691]: E1124 07:58:12.831050 4691 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 07:58:12 crc kubenswrapper[4691]: E1124 07:58:12.831110 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/21147e4f-4335-4c12-9a81-aa333d8301db-metrics-certs podName:21147e4f-4335-4c12-9a81-aa333d8301db nodeName:}" failed. No retries permitted until 2025-11-24 07:58:28.831094651 +0000 UTC m=+70.830043900 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/21147e4f-4335-4c12-9a81-aa333d8301db-metrics-certs") pod "network-metrics-daemon-98whr" (UID: "21147e4f-4335-4c12-9a81-aa333d8301db") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.873174 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.873221 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.873235 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.873256 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.873271 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:12Z","lastTransitionTime":"2025-11-24T07:58:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.976403 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.976468 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.976485 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.976505 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:12 crc kubenswrapper[4691]: I1124 07:58:12.976518 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:12Z","lastTransitionTime":"2025-11-24T07:58:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.079763 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.079857 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.079868 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.079890 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.079924 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:13Z","lastTransitionTime":"2025-11-24T07:58:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.176616 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6f24c_106a6e78-a004-4232-a0a2-efecf2f7c248/ovnkube-controller/1.log" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.181319 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" event={"ID":"106a6e78-a004-4232-a0a2-efecf2f7c248","Type":"ContainerStarted","Data":"cff243a6a8f5708245922e399a2c8d8f58545c5aeaa7ff1c56c967b75c21733e"} Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.181490 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.181569 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.181578 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.181592 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.181600 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:13Z","lastTransitionTime":"2025-11-24T07:58:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.181910 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.207321 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.237258 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.264486 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.281970 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.284098 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.284127 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.284135 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.284152 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.284162 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:13Z","lastTransitionTime":"2025-11-24T07:58:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.295547 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.314080 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cff243a6a8f5708245922e399a2c8d8f58545c5aeaa7ff1c56c967b75c21733e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff52c2a30eba958adc42ad51a8be59aa9a48a634e4ab31a247d12af6c72335ea\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T07:57:53Z\\\",\\\"message\\\":\\\"tor.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 07:57:53.045562 6118 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1124 07:57:53.045585 6118 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1124 07:57:53.045608 6118 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1124 07:57:53.045613 6118 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1124 07:57:53.045643 6118 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 07:57:53.045653 6118 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 07:57:53.045650 6118 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1124 07:57:53.045664 6118 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1124 07:57:53.045668 6118 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1124 07:57:53.045673 6118 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 07:57:53.045682 6118 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 07:57:53.045707 6118 handler.go:208] Removed *v1.Node event handler 2\\\\nI1124 07:57:53.045707 6118 factory.go:656] Stopping watch factory\\\\nI1124 07:57:53.045730 6118 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.326656 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pdtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c031c6f0-57e9-4339-ac63-323d1effb276\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39a35927a133bd73526e8f1e2a15274bcc887d91b27b0860cd3260189cae5c0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8qn6m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pdtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.349331 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.364822 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.379390 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"285fed6b-5793-4f67-8f5a-8bb6bebccfa2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5be30d4ad1d84663c3eb3b0bbca07fc116e71aef333d1a3ef6f669b87d96260\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c51bf95dda4f798cd9f1594fcdbeb3cbc4e53831358b6b16227bd2243f301f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4bjjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.386358 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.386394 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.386406 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.386422 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.386433 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:13Z","lastTransitionTime":"2025-11-24T07:58:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.393577 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-98whr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21147e4f-4335-4c12-9a81-aa333d8301db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvd6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvd6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-98whr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.408798 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.420842 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.435619 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.451640 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://730c8a45dbc29d5f37334a467817701d86334096f481ad6e97debbffad15b8ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.451919 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.463517 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.465332 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.479953 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.489473 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.489527 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.489539 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.489562 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.489576 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:13Z","lastTransitionTime":"2025-11-24T07:58:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.498080 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.514511 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.527229 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.536669 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.564131 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cff243a6a8f5708245922e399a2c8d8f58545c5aeaa7ff1c56c967b75c21733e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff52c2a30eba958adc42ad51a8be59aa9a48a634e4ab31a247d12af6c72335ea\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T07:57:53Z\\\",\\\"message\\\":\\\"tor.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 07:57:53.045562 6118 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1124 07:57:53.045585 6118 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1124 07:57:53.045608 6118 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1124 07:57:53.045613 6118 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1124 07:57:53.045643 6118 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 07:57:53.045653 6118 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 07:57:53.045650 6118 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1124 07:57:53.045664 6118 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1124 07:57:53.045668 6118 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1124 07:57:53.045673 6118 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 07:57:53.045682 6118 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 07:57:53.045707 6118 handler.go:208] Removed *v1.Node event handler 2\\\\nI1124 07:57:53.045707 6118 factory.go:656] Stopping watch factory\\\\nI1124 07:57:53.045730 6118 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.575635 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pdtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c031c6f0-57e9-4339-ac63-323d1effb276\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39a35927a133bd73526e8f1e2a15274bcc887d91b27b0860cd3260189cae5c0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8qn6m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pdtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.588496 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.592074 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.592125 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.592139 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.592161 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.592176 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:13Z","lastTransitionTime":"2025-11-24T07:58:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.600858 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.612144 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"285fed6b-5793-4f67-8f5a-8bb6bebccfa2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5be30d4ad1d84663c3eb3b0bbca07fc116e71aef333d1a3ef6f669b87d96260\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c51bf95dda4f798cd9f1594fcdbeb3cbc4e53831358b6b16227bd2243f301f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4bjjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.625683 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-98whr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21147e4f-4335-4c12-9a81-aa333d8301db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvd6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvd6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-98whr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.640461 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.657014 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.671268 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.686802 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://730c8a45dbc29d5f37334a467817701d86334096f481ad6e97debbffad15b8ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.695322 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.695375 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.695390 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.695408 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.695421 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:13Z","lastTransitionTime":"2025-11-24T07:58:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.700198 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.711956 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ace2c8b4-5923-49d7-b19a-8778bf4a4f99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7ef3b97a1e9ea8617dddcbbb0b43f3cbe2b27fc803429fc5d965b766852bb5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://142e19b1c2a5f2e3a2b0dfcf9220cf617333ffe8166d8a1abe10ac11c277f41d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c384d9f46f4574e44a82ec9df2a2700f39fecfaaffdd15506059c4085d89682c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58112d8ac5f25f3d4fca2a520c16125a2511e6a1d64db73aeea92d4a6aaf94d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://58112d8ac5f25f3d4fca2a520c16125a2511e6a1d64db73aeea92d4a6aaf94d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.725644 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.739374 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:13Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.759778 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:58:13 crc kubenswrapper[4691]: E1124 07:58:13.759884 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.802829 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.802896 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.802907 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.802927 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.802938 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:13Z","lastTransitionTime":"2025-11-24T07:58:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.906220 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.906295 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.906307 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.906324 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:13 crc kubenswrapper[4691]: I1124 07:58:13.906334 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:13Z","lastTransitionTime":"2025-11-24T07:58:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.009559 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.009599 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.009608 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.009622 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.009632 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:14Z","lastTransitionTime":"2025-11-24T07:58:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.111905 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.111999 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.112017 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.112043 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.112064 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:14Z","lastTransitionTime":"2025-11-24T07:58:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.186224 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6f24c_106a6e78-a004-4232-a0a2-efecf2f7c248/ovnkube-controller/2.log" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.187150 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6f24c_106a6e78-a004-4232-a0a2-efecf2f7c248/ovnkube-controller/1.log" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.191029 4691 generic.go:334] "Generic (PLEG): container finished" podID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerID="cff243a6a8f5708245922e399a2c8d8f58545c5aeaa7ff1c56c967b75c21733e" exitCode=1 Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.191091 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" event={"ID":"106a6e78-a004-4232-a0a2-efecf2f7c248","Type":"ContainerDied","Data":"cff243a6a8f5708245922e399a2c8d8f58545c5aeaa7ff1c56c967b75c21733e"} Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.191206 4691 scope.go:117] "RemoveContainer" containerID="ff52c2a30eba958adc42ad51a8be59aa9a48a634e4ab31a247d12af6c72335ea" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.192205 4691 scope.go:117] "RemoveContainer" containerID="cff243a6a8f5708245922e399a2c8d8f58545c5aeaa7ff1c56c967b75c21733e" Nov 24 07:58:14 crc kubenswrapper[4691]: E1124 07:58:14.192550 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-6f24c_openshift-ovn-kubernetes(106a6e78-a004-4232-a0a2-efecf2f7c248)\"" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.215726 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.215787 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.215815 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.215846 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.215872 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:14Z","lastTransitionTime":"2025-11-24T07:58:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.215877 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:14Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.234358 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:14Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.251862 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:14Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.269597 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ace2c8b4-5923-49d7-b19a-8778bf4a4f99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7ef3b97a1e9ea8617dddcbbb0b43f3cbe2b27fc803429fc5d965b766852bb5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://142e19b1c2a5f2e3a2b0dfcf9220cf617333ffe8166d8a1abe10ac11c277f41d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c384d9f46f4574e44a82ec9df2a2700f39fecfaaffdd15506059c4085d89682c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58112d8ac5f25f3d4fca2a520c16125a2511e6a1d64db73aeea92d4a6aaf94d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://58112d8ac5f25f3d4fca2a520c16125a2511e6a1d64db73aeea92d4a6aaf94d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:14Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.282200 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:14Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.305734 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cff243a6a8f5708245922e399a2c8d8f58545c5aeaa7ff1c56c967b75c21733e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff52c2a30eba958adc42ad51a8be59aa9a48a634e4ab31a247d12af6c72335ea\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T07:57:53Z\\\",\\\"message\\\":\\\"tor.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1124 07:57:53.045562 6118 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1124 07:57:53.045585 6118 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1124 07:57:53.045608 6118 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1124 07:57:53.045613 6118 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1124 07:57:53.045643 6118 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1124 07:57:53.045653 6118 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1124 07:57:53.045650 6118 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1124 07:57:53.045664 6118 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1124 07:57:53.045668 6118 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1124 07:57:53.045673 6118 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1124 07:57:53.045682 6118 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1124 07:57:53.045707 6118 handler.go:208] Removed *v1.Node event handler 2\\\\nI1124 07:57:53.045707 6118 factory.go:656] Stopping watch factory\\\\nI1124 07:57:53.045730 6118 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cff243a6a8f5708245922e399a2c8d8f58545c5aeaa7ff1c56c967b75c21733e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T07:58:13Z\\\",\\\"message\\\":\\\"il\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1124 07:58:13.659249 6383 transact.go:42] Configuring OVN: [{Op:update Table:Logical_Router_Static_Route Row:map[ip_prefix:10.217.0.0/22 nexthop:100.64.0.2 policy:{GoSet:[src-ip]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {8944024f-deb7-4076-afb3-4b50a2ff4b4b}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:static_routes Mutator:insert Value:{GoSet:[{GoUUID:8944024f-deb7-4076-afb3-4b50a2ff4b4b}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {f6d604c1-9711-4e25-be6c-79ec28bbad1b}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1124 07:58:13.659514 6383 obj_retry.go:551] Creating *factory.egressNode crc took: 2.050517ms\\\\nI1124 07:58:13.659530 6383 factory.go:1336] Added *v1.Node event handler 7\\\\nI1124 07:58:13.659549 6383 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1124 07:58:13.659747 6383 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1124 07:58:13.659815 6383 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1124 07:58:13.659845 6383 ovnkube.go:599] Stopped ovnkube\\\\nI1124 07:58:13.659871 6383 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 07:58:13.659939 6383 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:14Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.319724 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.319758 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.319769 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.319788 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.319800 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:14Z","lastTransitionTime":"2025-11-24T07:58:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.319988 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pdtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c031c6f0-57e9-4339-ac63-323d1effb276\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39a35927a133bd73526e8f1e2a15274bcc887d91b27b0860cd3260189cae5c0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8qn6m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pdtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:14Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.340323 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:14Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.361091 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:14Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.379394 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:14Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.394794 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-98whr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21147e4f-4335-4c12-9a81-aa333d8301db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvd6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvd6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-98whr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:14Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.412802 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:14Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.422840 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.423060 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.423201 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.423320 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.423401 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:14Z","lastTransitionTime":"2025-11-24T07:58:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.432970 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:14Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.446075 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"285fed6b-5793-4f67-8f5a-8bb6bebccfa2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5be30d4ad1d84663c3eb3b0bbca07fc116e71aef333d1a3ef6f669b87d96260\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c51bf95dda4f798cd9f1594fcdbeb3cbc4e53831358b6b16227bd2243f301f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4bjjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:14Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.463581 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://730c8a45dbc29d5f37334a467817701d86334096f481ad6e97debbffad15b8ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:14Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.478776 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:14Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.493054 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:14Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.510290 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:14Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.527014 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.527066 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.527076 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.527094 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.527106 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:14Z","lastTransitionTime":"2025-11-24T07:58:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.630167 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.630224 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.630235 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.630255 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.630267 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:14Z","lastTransitionTime":"2025-11-24T07:58:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.733699 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.734684 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.734730 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.734759 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.734784 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:14Z","lastTransitionTime":"2025-11-24T07:58:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.760130 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.760246 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:58:14 crc kubenswrapper[4691]: E1124 07:58:14.760317 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:58:14 crc kubenswrapper[4691]: E1124 07:58:14.760505 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.760597 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:58:14 crc kubenswrapper[4691]: E1124 07:58:14.760714 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.838479 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.838545 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.838565 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.838591 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.838610 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:14Z","lastTransitionTime":"2025-11-24T07:58:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.943137 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.943210 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.943229 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.943261 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:14 crc kubenswrapper[4691]: I1124 07:58:14.943322 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:14Z","lastTransitionTime":"2025-11-24T07:58:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.047090 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.047156 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.047174 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.047207 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.047228 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:15Z","lastTransitionTime":"2025-11-24T07:58:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.150671 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.150758 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.150792 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.150828 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.150849 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:15Z","lastTransitionTime":"2025-11-24T07:58:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.199359 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6f24c_106a6e78-a004-4232-a0a2-efecf2f7c248/ovnkube-controller/2.log" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.205546 4691 scope.go:117] "RemoveContainer" containerID="cff243a6a8f5708245922e399a2c8d8f58545c5aeaa7ff1c56c967b75c21733e" Nov 24 07:58:15 crc kubenswrapper[4691]: E1124 07:58:15.205838 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-6f24c_openshift-ovn-kubernetes(106a6e78-a004-4232-a0a2-efecf2f7c248)\"" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.211112 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.211192 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.211220 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.211260 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.211283 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:15Z","lastTransitionTime":"2025-11-24T07:58:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.229884 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:15Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:15 crc kubenswrapper[4691]: E1124 07:58:15.233688 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:15Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.240938 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.241008 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.241026 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.241052 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.241072 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:15Z","lastTransitionTime":"2025-11-24T07:58:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.253342 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:15Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:15 crc kubenswrapper[4691]: E1124 07:58:15.262655 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:15Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.268960 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.269039 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.269054 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.269078 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.269093 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:15Z","lastTransitionTime":"2025-11-24T07:58:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.273875 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"285fed6b-5793-4f67-8f5a-8bb6bebccfa2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5be30d4ad1d84663c3eb3b0bbca07fc116e71aef333d1a3ef6f669b87d96260\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c51bf95dda4f798cd9f1594fcdbeb3cbc4e53831358b6b16227bd2243f301f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4bjjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:15Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.290230 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-98whr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21147e4f-4335-4c12-9a81-aa333d8301db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvd6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvd6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-98whr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:15Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:15 crc kubenswrapper[4691]: E1124 07:58:15.290567 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:15Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.296110 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.296168 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.296195 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.296228 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.296251 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:15Z","lastTransitionTime":"2025-11-24T07:58:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.306909 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:15Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:15 crc kubenswrapper[4691]: E1124 07:58:15.308414 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:15Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.312009 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.312029 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.312038 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.312050 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.312059 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:15Z","lastTransitionTime":"2025-11-24T07:58:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.322029 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:15Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:15 crc kubenswrapper[4691]: E1124 07:58:15.326072 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:15Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:15Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:15 crc kubenswrapper[4691]: E1124 07:58:15.326226 4691 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.328093 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.328127 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.328147 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.328164 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.328176 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:15Z","lastTransitionTime":"2025-11-24T07:58:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.353791 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:15Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.387747 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://730c8a45dbc29d5f37334a467817701d86334096f481ad6e97debbffad15b8ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:15Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.406324 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:15Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.423200 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:15Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.431013 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.431054 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.431066 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.431086 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.431099 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:15Z","lastTransitionTime":"2025-11-24T07:58:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.437016 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ace2c8b4-5923-49d7-b19a-8778bf4a4f99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7ef3b97a1e9ea8617dddcbbb0b43f3cbe2b27fc803429fc5d965b766852bb5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://142e19b1c2a5f2e3a2b0dfcf9220cf617333ffe8166d8a1abe10ac11c277f41d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c384d9f46f4574e44a82ec9df2a2700f39fecfaaffdd15506059c4085d89682c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58112d8ac5f25f3d4fca2a520c16125a2511e6a1d64db73aeea92d4a6aaf94d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://58112d8ac5f25f3d4fca2a520c16125a2511e6a1d64db73aeea92d4a6aaf94d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:15Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.451545 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:15Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.471533 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cff243a6a8f5708245922e399a2c8d8f58545c5aeaa7ff1c56c967b75c21733e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cff243a6a8f5708245922e399a2c8d8f58545c5aeaa7ff1c56c967b75c21733e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T07:58:13Z\\\",\\\"message\\\":\\\"il\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1124 07:58:13.659249 6383 transact.go:42] Configuring OVN: [{Op:update Table:Logical_Router_Static_Route Row:map[ip_prefix:10.217.0.0/22 nexthop:100.64.0.2 policy:{GoSet:[src-ip]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {8944024f-deb7-4076-afb3-4b50a2ff4b4b}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:static_routes Mutator:insert Value:{GoSet:[{GoUUID:8944024f-deb7-4076-afb3-4b50a2ff4b4b}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {f6d604c1-9711-4e25-be6c-79ec28bbad1b}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1124 07:58:13.659514 6383 obj_retry.go:551] Creating *factory.egressNode crc took: 2.050517ms\\\\nI1124 07:58:13.659530 6383 factory.go:1336] Added *v1.Node event handler 7\\\\nI1124 07:58:13.659549 6383 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1124 07:58:13.659747 6383 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1124 07:58:13.659815 6383 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1124 07:58:13.659845 6383 ovnkube.go:599] Stopped ovnkube\\\\nI1124 07:58:13.659871 6383 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 07:58:13.659939 6383 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:58:12Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-6f24c_openshift-ovn-kubernetes(106a6e78-a004-4232-a0a2-efecf2f7c248)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:15Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.487007 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pdtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c031c6f0-57e9-4339-ac63-323d1effb276\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39a35927a133bd73526e8f1e2a15274bcc887d91b27b0860cd3260189cae5c0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8qn6m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pdtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:15Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.518593 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:15Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.533823 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.534123 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.534198 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.534267 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.534336 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:15Z","lastTransitionTime":"2025-11-24T07:58:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.537740 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:15Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.558055 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:15Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.573883 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:15Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.637165 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.637478 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.637555 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.637668 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.637766 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:15Z","lastTransitionTime":"2025-11-24T07:58:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.740365 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.740412 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.740421 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.740437 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.740475 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:15Z","lastTransitionTime":"2025-11-24T07:58:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.759724 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:58:15 crc kubenswrapper[4691]: E1124 07:58:15.759877 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.844196 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.844637 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.844676 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.844696 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.844710 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:15Z","lastTransitionTime":"2025-11-24T07:58:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.947479 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.947524 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.947538 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.947557 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:15 crc kubenswrapper[4691]: I1124 07:58:15.947574 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:15Z","lastTransitionTime":"2025-11-24T07:58:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.054391 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.054461 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.054474 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.054495 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.054508 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:16Z","lastTransitionTime":"2025-11-24T07:58:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.156581 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.156652 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.156663 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.156684 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.156696 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:16Z","lastTransitionTime":"2025-11-24T07:58:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.260197 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.260286 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.260307 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.260343 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.260366 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:16Z","lastTransitionTime":"2025-11-24T07:58:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.362730 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.362773 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.362783 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.362800 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.362811 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:16Z","lastTransitionTime":"2025-11-24T07:58:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.465558 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.465628 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.465647 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.465673 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.465688 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:16Z","lastTransitionTime":"2025-11-24T07:58:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.568601 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.568678 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.568699 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.568730 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.568759 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:16Z","lastTransitionTime":"2025-11-24T07:58:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.672014 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.672382 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.672572 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.672732 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.672916 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:16Z","lastTransitionTime":"2025-11-24T07:58:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.760193 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.760193 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:58:16 crc kubenswrapper[4691]: E1124 07:58:16.760515 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.760583 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:58:16 crc kubenswrapper[4691]: E1124 07:58:16.760837 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:58:16 crc kubenswrapper[4691]: E1124 07:58:16.760933 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.775536 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.775626 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.775661 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.775697 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.775722 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:16Z","lastTransitionTime":"2025-11-24T07:58:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.878396 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.878470 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.878485 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.878505 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.878517 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:16Z","lastTransitionTime":"2025-11-24T07:58:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.982745 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.983089 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.983170 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.983237 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:16 crc kubenswrapper[4691]: I1124 07:58:16.983293 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:16Z","lastTransitionTime":"2025-11-24T07:58:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.085802 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.086156 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.086248 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.086334 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.086419 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:17Z","lastTransitionTime":"2025-11-24T07:58:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.188666 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.188719 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.188734 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.188757 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.188772 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:17Z","lastTransitionTime":"2025-11-24T07:58:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.291241 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.291313 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.291328 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.291346 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.291376 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:17Z","lastTransitionTime":"2025-11-24T07:58:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.394441 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.394492 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.394507 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.394524 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.394533 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:17Z","lastTransitionTime":"2025-11-24T07:58:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.497531 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.497601 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.497610 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.497628 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.497640 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:17Z","lastTransitionTime":"2025-11-24T07:58:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.600367 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.600486 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.600504 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.600526 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.600537 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:17Z","lastTransitionTime":"2025-11-24T07:58:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.704128 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.704197 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.704217 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.704252 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.704276 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:17Z","lastTransitionTime":"2025-11-24T07:58:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.759735 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:58:17 crc kubenswrapper[4691]: E1124 07:58:17.759922 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.807086 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.807134 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.807144 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.807164 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.807175 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:17Z","lastTransitionTime":"2025-11-24T07:58:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.909845 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.909899 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.909909 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.909926 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:17 crc kubenswrapper[4691]: I1124 07:58:17.909935 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:17Z","lastTransitionTime":"2025-11-24T07:58:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.012383 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.012429 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.012460 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.012475 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.012485 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:18Z","lastTransitionTime":"2025-11-24T07:58:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.115694 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.116114 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.116320 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.116489 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.116664 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:18Z","lastTransitionTime":"2025-11-24T07:58:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.219272 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.219342 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.219355 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.219371 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.219381 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:18Z","lastTransitionTime":"2025-11-24T07:58:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.321730 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.321783 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.321792 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.321808 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.321818 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:18Z","lastTransitionTime":"2025-11-24T07:58:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.424982 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.425045 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.425063 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.425090 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.425107 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:18Z","lastTransitionTime":"2025-11-24T07:58:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.534872 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.534959 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.534986 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.535020 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.535054 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:18Z","lastTransitionTime":"2025-11-24T07:58:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.638745 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.638840 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.638859 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.638887 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.638906 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:18Z","lastTransitionTime":"2025-11-24T07:58:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.742071 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.742130 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.742143 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.742166 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.742179 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:18Z","lastTransitionTime":"2025-11-24T07:58:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.759732 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.759770 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:58:18 crc kubenswrapper[4691]: E1124 07:58:18.760040 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.760076 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:58:18 crc kubenswrapper[4691]: E1124 07:58:18.760313 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:58:18 crc kubenswrapper[4691]: E1124 07:58:18.760394 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.775273 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-98whr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21147e4f-4335-4c12-9a81-aa333d8301db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvd6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvd6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-98whr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:18Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.790485 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:18Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.802666 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:18Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.815017 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"285fed6b-5793-4f67-8f5a-8bb6bebccfa2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5be30d4ad1d84663c3eb3b0bbca07fc116e71aef333d1a3ef6f669b87d96260\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c51bf95dda4f798cd9f1594fcdbeb3cbc4e53831358b6b16227bd2243f301f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4bjjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:18Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.829185 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://730c8a45dbc29d5f37334a467817701d86334096f481ad6e97debbffad15b8ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:18Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.844385 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.844415 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.844424 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.844438 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.844469 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:18Z","lastTransitionTime":"2025-11-24T07:58:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.848684 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:18Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.868104 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:18Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.892288 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:18Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.912481 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:18Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.926085 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:18Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.945484 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:18Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.947159 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.947340 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.947415 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.947576 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.947660 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:18Z","lastTransitionTime":"2025-11-24T07:58:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.961209 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ace2c8b4-5923-49d7-b19a-8778bf4a4f99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7ef3b97a1e9ea8617dddcbbb0b43f3cbe2b27fc803429fc5d965b766852bb5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://142e19b1c2a5f2e3a2b0dfcf9220cf617333ffe8166d8a1abe10ac11c277f41d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c384d9f46f4574e44a82ec9df2a2700f39fecfaaffdd15506059c4085d89682c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58112d8ac5f25f3d4fca2a520c16125a2511e6a1d64db73aeea92d4a6aaf94d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://58112d8ac5f25f3d4fca2a520c16125a2511e6a1d64db73aeea92d4a6aaf94d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:18Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:18 crc kubenswrapper[4691]: I1124 07:58:18.977114 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:18Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.010417 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cff243a6a8f5708245922e399a2c8d8f58545c5aeaa7ff1c56c967b75c21733e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cff243a6a8f5708245922e399a2c8d8f58545c5aeaa7ff1c56c967b75c21733e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T07:58:13Z\\\",\\\"message\\\":\\\"il\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1124 07:58:13.659249 6383 transact.go:42] Configuring OVN: [{Op:update Table:Logical_Router_Static_Route Row:map[ip_prefix:10.217.0.0/22 nexthop:100.64.0.2 policy:{GoSet:[src-ip]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {8944024f-deb7-4076-afb3-4b50a2ff4b4b}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:static_routes Mutator:insert Value:{GoSet:[{GoUUID:8944024f-deb7-4076-afb3-4b50a2ff4b4b}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {f6d604c1-9711-4e25-be6c-79ec28bbad1b}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1124 07:58:13.659514 6383 obj_retry.go:551] Creating *factory.egressNode crc took: 2.050517ms\\\\nI1124 07:58:13.659530 6383 factory.go:1336] Added *v1.Node event handler 7\\\\nI1124 07:58:13.659549 6383 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1124 07:58:13.659747 6383 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1124 07:58:13.659815 6383 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1124 07:58:13.659845 6383 ovnkube.go:599] Stopped ovnkube\\\\nI1124 07:58:13.659871 6383 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 07:58:13.659939 6383 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:58:12Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-6f24c_openshift-ovn-kubernetes(106a6e78-a004-4232-a0a2-efecf2f7c248)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:19Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.026125 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pdtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c031c6f0-57e9-4339-ac63-323d1effb276\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39a35927a133bd73526e8f1e2a15274bcc887d91b27b0860cd3260189cae5c0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8qn6m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pdtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:19Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.052502 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.052578 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.052594 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.052617 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.052632 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:19Z","lastTransitionTime":"2025-11-24T07:58:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.053031 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:19Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.070462 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:19Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.087313 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:19Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.156052 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.156107 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.156119 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.156141 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.156154 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:19Z","lastTransitionTime":"2025-11-24T07:58:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.260359 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.260703 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.260841 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.260985 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.261077 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:19Z","lastTransitionTime":"2025-11-24T07:58:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.365200 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.365627 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.365711 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.365805 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.365910 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:19Z","lastTransitionTime":"2025-11-24T07:58:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.468885 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.468963 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.468986 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.469013 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.469029 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:19Z","lastTransitionTime":"2025-11-24T07:58:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.572710 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.572789 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.572808 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.572837 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.572856 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:19Z","lastTransitionTime":"2025-11-24T07:58:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.679532 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.679594 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.679604 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.679626 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.679639 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:19Z","lastTransitionTime":"2025-11-24T07:58:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.759804 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:58:19 crc kubenswrapper[4691]: E1124 07:58:19.759989 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.783655 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.783714 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.783728 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.783760 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.783780 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:19Z","lastTransitionTime":"2025-11-24T07:58:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.886305 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.886351 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.886363 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.886384 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.886396 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:19Z","lastTransitionTime":"2025-11-24T07:58:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.990256 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.990385 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.990398 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.990415 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:19 crc kubenswrapper[4691]: I1124 07:58:19.990466 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:19Z","lastTransitionTime":"2025-11-24T07:58:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.093554 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.093635 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.093654 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.093691 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.093710 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:20Z","lastTransitionTime":"2025-11-24T07:58:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.197931 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.198022 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.198058 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.198089 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.198109 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:20Z","lastTransitionTime":"2025-11-24T07:58:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.302657 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.302721 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.302744 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.302773 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.302795 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:20Z","lastTransitionTime":"2025-11-24T07:58:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.405872 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.405945 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.405970 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.405996 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.406013 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:20Z","lastTransitionTime":"2025-11-24T07:58:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.509231 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.509290 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.509305 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.509330 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.509343 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:20Z","lastTransitionTime":"2025-11-24T07:58:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.612650 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.612709 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.612728 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.612756 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.612777 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:20Z","lastTransitionTime":"2025-11-24T07:58:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.715968 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.716303 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.716416 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.716568 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.716663 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:20Z","lastTransitionTime":"2025-11-24T07:58:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.760430 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.760536 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:58:20 crc kubenswrapper[4691]: E1124 07:58:20.760654 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.760731 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:58:20 crc kubenswrapper[4691]: E1124 07:58:20.760926 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:58:20 crc kubenswrapper[4691]: E1124 07:58:20.761096 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.819299 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.819344 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.819359 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.819377 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.819389 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:20Z","lastTransitionTime":"2025-11-24T07:58:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.922512 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.922553 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.922566 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.922584 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:20 crc kubenswrapper[4691]: I1124 07:58:20.922597 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:20Z","lastTransitionTime":"2025-11-24T07:58:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.025156 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.025541 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.025634 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.025726 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.025794 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:21Z","lastTransitionTime":"2025-11-24T07:58:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.128809 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.129145 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.129208 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.129273 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.129328 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:21Z","lastTransitionTime":"2025-11-24T07:58:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.232674 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.233037 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.233125 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.233198 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.233260 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:21Z","lastTransitionTime":"2025-11-24T07:58:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.338275 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.338347 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.338368 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.338393 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.338411 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:21Z","lastTransitionTime":"2025-11-24T07:58:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.441396 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.441476 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.441491 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.441512 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.441522 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:21Z","lastTransitionTime":"2025-11-24T07:58:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.544208 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.544275 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.544293 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.544321 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.544342 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:21Z","lastTransitionTime":"2025-11-24T07:58:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.646874 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.646917 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.646926 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.646941 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.646951 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:21Z","lastTransitionTime":"2025-11-24T07:58:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.749637 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.749903 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.750008 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.750087 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.750153 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:21Z","lastTransitionTime":"2025-11-24T07:58:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.760274 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:58:21 crc kubenswrapper[4691]: E1124 07:58:21.760572 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.852557 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.852612 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.852625 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.852650 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.852666 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:21Z","lastTransitionTime":"2025-11-24T07:58:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.955690 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.955737 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.955747 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.955764 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:21 crc kubenswrapper[4691]: I1124 07:58:21.955774 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:21Z","lastTransitionTime":"2025-11-24T07:58:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.059355 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.059417 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.059436 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.059480 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.059495 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:22Z","lastTransitionTime":"2025-11-24T07:58:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.162527 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.162822 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.162885 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.162953 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.163062 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:22Z","lastTransitionTime":"2025-11-24T07:58:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.265506 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.265576 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.265593 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.265620 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.265635 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:22Z","lastTransitionTime":"2025-11-24T07:58:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.369874 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.369915 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.369926 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.369944 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.369957 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:22Z","lastTransitionTime":"2025-11-24T07:58:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.475970 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.476035 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.476048 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.476072 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.476088 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:22Z","lastTransitionTime":"2025-11-24T07:58:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.579330 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.580319 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.580492 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.580589 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.580697 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:22Z","lastTransitionTime":"2025-11-24T07:58:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.684414 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.684533 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.684548 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.684569 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.684583 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:22Z","lastTransitionTime":"2025-11-24T07:58:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.760346 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:58:22 crc kubenswrapper[4691]: E1124 07:58:22.760525 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.760763 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:58:22 crc kubenswrapper[4691]: E1124 07:58:22.760828 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.761152 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:58:22 crc kubenswrapper[4691]: E1124 07:58:22.761462 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.788091 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.788199 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.788226 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.788252 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.788266 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:22Z","lastTransitionTime":"2025-11-24T07:58:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.891491 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.891558 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.891572 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.891595 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.891610 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:22Z","lastTransitionTime":"2025-11-24T07:58:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.994577 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.994664 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.994682 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.994711 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:22 crc kubenswrapper[4691]: I1124 07:58:22.994729 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:22Z","lastTransitionTime":"2025-11-24T07:58:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.097801 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.098154 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.098234 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.098338 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.098437 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:23Z","lastTransitionTime":"2025-11-24T07:58:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.202168 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.202636 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.202761 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.202866 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.202975 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:23Z","lastTransitionTime":"2025-11-24T07:58:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.305793 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.305867 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.305876 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.305894 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.305903 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:23Z","lastTransitionTime":"2025-11-24T07:58:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.409408 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.409469 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.409482 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.409505 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.409516 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:23Z","lastTransitionTime":"2025-11-24T07:58:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.511702 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.511979 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.512212 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.512362 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.512534 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:23Z","lastTransitionTime":"2025-11-24T07:58:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.615520 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.615566 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.615578 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.615596 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.615611 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:23Z","lastTransitionTime":"2025-11-24T07:58:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.718126 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.718184 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.718220 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.718242 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.718257 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:23Z","lastTransitionTime":"2025-11-24T07:58:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.760422 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:58:23 crc kubenswrapper[4691]: E1124 07:58:23.760580 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.820785 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.820830 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.820842 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.820859 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.820869 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:23Z","lastTransitionTime":"2025-11-24T07:58:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.923912 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.924005 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.924033 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.924066 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:23 crc kubenswrapper[4691]: I1124 07:58:23.924090 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:23Z","lastTransitionTime":"2025-11-24T07:58:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.026586 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.026634 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.026645 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.026663 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.026677 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:24Z","lastTransitionTime":"2025-11-24T07:58:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.128861 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.129186 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.129198 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.129214 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.129225 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:24Z","lastTransitionTime":"2025-11-24T07:58:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.232167 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.232204 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.232214 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.232230 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.232240 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:24Z","lastTransitionTime":"2025-11-24T07:58:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.335413 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.335507 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.335527 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.335556 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.335573 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:24Z","lastTransitionTime":"2025-11-24T07:58:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.438335 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.438387 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.438418 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.438434 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.438456 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:24Z","lastTransitionTime":"2025-11-24T07:58:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.541684 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.541737 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.541749 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.541767 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.541783 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:24Z","lastTransitionTime":"2025-11-24T07:58:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.644626 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.644683 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.644694 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.644711 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.644723 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:24Z","lastTransitionTime":"2025-11-24T07:58:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.747778 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.747813 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.747822 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.747837 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.747847 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:24Z","lastTransitionTime":"2025-11-24T07:58:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.760219 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.760238 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.760340 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:58:24 crc kubenswrapper[4691]: E1124 07:58:24.760524 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:58:24 crc kubenswrapper[4691]: E1124 07:58:24.760680 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:58:24 crc kubenswrapper[4691]: E1124 07:58:24.760833 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.849883 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.849931 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.849968 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.850003 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.850016 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:24Z","lastTransitionTime":"2025-11-24T07:58:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.953676 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.953740 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.953753 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.953803 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:24 crc kubenswrapper[4691]: I1124 07:58:24.953821 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:24Z","lastTransitionTime":"2025-11-24T07:58:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.057372 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.057434 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.057477 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.057510 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.057531 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:25Z","lastTransitionTime":"2025-11-24T07:58:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.160626 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.160709 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.160719 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.160738 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.160751 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:25Z","lastTransitionTime":"2025-11-24T07:58:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.263246 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.263294 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.263307 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.263328 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.263343 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:25Z","lastTransitionTime":"2025-11-24T07:58:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.365787 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.365836 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.365847 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.365865 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.365877 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:25Z","lastTransitionTime":"2025-11-24T07:58:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.468821 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.468872 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.468881 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.468906 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.468918 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:25Z","lastTransitionTime":"2025-11-24T07:58:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.488059 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.488106 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.488118 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.488138 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.488149 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:25Z","lastTransitionTime":"2025-11-24T07:58:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:25 crc kubenswrapper[4691]: E1124 07:58:25.511687 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:25Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.517281 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.517337 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.517351 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.517377 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.517400 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:25Z","lastTransitionTime":"2025-11-24T07:58:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:25 crc kubenswrapper[4691]: E1124 07:58:25.531290 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:25Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.535860 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.535901 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.535915 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.535937 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.535953 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:25Z","lastTransitionTime":"2025-11-24T07:58:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:25 crc kubenswrapper[4691]: E1124 07:58:25.551832 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:25Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.557330 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.557389 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.557401 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.557422 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.557439 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:25Z","lastTransitionTime":"2025-11-24T07:58:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:25 crc kubenswrapper[4691]: E1124 07:58:25.583233 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:25Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.589074 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.589162 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.589174 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.589220 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.589237 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:25Z","lastTransitionTime":"2025-11-24T07:58:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:25 crc kubenswrapper[4691]: E1124 07:58:25.603328 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:25Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:25 crc kubenswrapper[4691]: E1124 07:58:25.603497 4691 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.605220 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.605276 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.605289 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.605307 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.605318 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:25Z","lastTransitionTime":"2025-11-24T07:58:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.707801 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.707841 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.707850 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.707869 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.707880 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:25Z","lastTransitionTime":"2025-11-24T07:58:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.759513 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:58:25 crc kubenswrapper[4691]: E1124 07:58:25.759702 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.810019 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.810065 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.810075 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.810091 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.810101 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:25Z","lastTransitionTime":"2025-11-24T07:58:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.912908 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.912982 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.912994 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.913018 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:25 crc kubenswrapper[4691]: I1124 07:58:25.913038 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:25Z","lastTransitionTime":"2025-11-24T07:58:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.022179 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.022240 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.022252 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.022272 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.022336 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:26Z","lastTransitionTime":"2025-11-24T07:58:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.125258 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.125353 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.125370 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.125822 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.125848 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:26Z","lastTransitionTime":"2025-11-24T07:58:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.229347 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.229393 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.229403 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.229420 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.229431 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:26Z","lastTransitionTime":"2025-11-24T07:58:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.332081 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.332145 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.332164 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.332188 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.332203 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:26Z","lastTransitionTime":"2025-11-24T07:58:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.435475 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.435538 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.435551 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.435573 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.435586 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:26Z","lastTransitionTime":"2025-11-24T07:58:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.538532 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.538580 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.538594 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.538618 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.538635 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:26Z","lastTransitionTime":"2025-11-24T07:58:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.662512 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.662569 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.662581 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.662606 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.662621 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:26Z","lastTransitionTime":"2025-11-24T07:58:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.760334 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.760425 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.760441 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:58:26 crc kubenswrapper[4691]: E1124 07:58:26.760568 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:58:26 crc kubenswrapper[4691]: E1124 07:58:26.760624 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:58:26 crc kubenswrapper[4691]: E1124 07:58:26.760696 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.765064 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.765104 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.765124 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.765144 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.765162 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:26Z","lastTransitionTime":"2025-11-24T07:58:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.868221 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.868276 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.868290 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.868312 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.868325 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:26Z","lastTransitionTime":"2025-11-24T07:58:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.970914 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.970964 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.970980 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.971000 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:26 crc kubenswrapper[4691]: I1124 07:58:26.971011 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:26Z","lastTransitionTime":"2025-11-24T07:58:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.074338 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.074390 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.074401 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.074420 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.074436 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:27Z","lastTransitionTime":"2025-11-24T07:58:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.177351 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.177407 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.177425 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.177482 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.177501 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:27Z","lastTransitionTime":"2025-11-24T07:58:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.286590 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.286655 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.286669 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.286690 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.286704 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:27Z","lastTransitionTime":"2025-11-24T07:58:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.390045 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.390078 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.390109 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.390124 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.390133 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:27Z","lastTransitionTime":"2025-11-24T07:58:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.493027 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.493078 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.493094 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.493113 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.493129 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:27Z","lastTransitionTime":"2025-11-24T07:58:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.596045 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.596096 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.596106 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.596125 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.596135 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:27Z","lastTransitionTime":"2025-11-24T07:58:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.698722 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.698764 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.698792 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.698806 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.698815 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:27Z","lastTransitionTime":"2025-11-24T07:58:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.759468 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:58:27 crc kubenswrapper[4691]: E1124 07:58:27.759604 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.802104 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.802139 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.802148 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.802163 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.802174 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:27Z","lastTransitionTime":"2025-11-24T07:58:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.905244 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.905314 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.905324 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.905344 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:27 crc kubenswrapper[4691]: I1124 07:58:27.905355 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:27Z","lastTransitionTime":"2025-11-24T07:58:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.008034 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.008087 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.008096 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.008114 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.008123 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:28Z","lastTransitionTime":"2025-11-24T07:58:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.111059 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.111091 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.111099 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.111116 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.111125 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:28Z","lastTransitionTime":"2025-11-24T07:58:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.214504 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.214555 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.214565 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.214586 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.214598 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:28Z","lastTransitionTime":"2025-11-24T07:58:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.318112 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.318269 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.318297 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.318353 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.318373 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:28Z","lastTransitionTime":"2025-11-24T07:58:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.421155 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.421305 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.421330 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.421363 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.421389 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:28Z","lastTransitionTime":"2025-11-24T07:58:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.524544 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.524659 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.524712 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.524744 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.524843 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:28Z","lastTransitionTime":"2025-11-24T07:58:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.628426 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.628506 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.628521 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.628542 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.628558 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:28Z","lastTransitionTime":"2025-11-24T07:58:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.731732 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.731808 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.731817 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.731853 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.731864 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:28Z","lastTransitionTime":"2025-11-24T07:58:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.759915 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.759986 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:58:28 crc kubenswrapper[4691]: E1124 07:58:28.760122 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:58:28 crc kubenswrapper[4691]: E1124 07:58:28.760363 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.760902 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:58:28 crc kubenswrapper[4691]: E1124 07:58:28.761037 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.781184 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:28Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.794497 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ace2c8b4-5923-49d7-b19a-8778bf4a4f99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7ef3b97a1e9ea8617dddcbbb0b43f3cbe2b27fc803429fc5d965b766852bb5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://142e19b1c2a5f2e3a2b0dfcf9220cf617333ffe8166d8a1abe10ac11c277f41d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c384d9f46f4574e44a82ec9df2a2700f39fecfaaffdd15506059c4085d89682c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58112d8ac5f25f3d4fca2a520c16125a2511e6a1d64db73aeea92d4a6aaf94d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://58112d8ac5f25f3d4fca2a520c16125a2511e6a1d64db73aeea92d4a6aaf94d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:28Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.808211 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:28Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.820173 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:28Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.835305 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:28Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.839403 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.839481 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.839492 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.839511 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.839523 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:28Z","lastTransitionTime":"2025-11-24T07:58:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.839692 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/21147e4f-4335-4c12-9a81-aa333d8301db-metrics-certs\") pod \"network-metrics-daemon-98whr\" (UID: \"21147e4f-4335-4c12-9a81-aa333d8301db\") " pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:58:28 crc kubenswrapper[4691]: E1124 07:58:28.839854 4691 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 07:58:28 crc kubenswrapper[4691]: E1124 07:58:28.839929 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/21147e4f-4335-4c12-9a81-aa333d8301db-metrics-certs podName:21147e4f-4335-4c12-9a81-aa333d8301db nodeName:}" failed. No retries permitted until 2025-11-24 07:59:00.839906741 +0000 UTC m=+102.838855990 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/21147e4f-4335-4c12-9a81-aa333d8301db-metrics-certs") pod "network-metrics-daemon-98whr" (UID: "21147e4f-4335-4c12-9a81-aa333d8301db") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.849698 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:28Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.861783 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:28Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.887973 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cff243a6a8f5708245922e399a2c8d8f58545c5aeaa7ff1c56c967b75c21733e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cff243a6a8f5708245922e399a2c8d8f58545c5aeaa7ff1c56c967b75c21733e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T07:58:13Z\\\",\\\"message\\\":\\\"il\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1124 07:58:13.659249 6383 transact.go:42] Configuring OVN: [{Op:update Table:Logical_Router_Static_Route Row:map[ip_prefix:10.217.0.0/22 nexthop:100.64.0.2 policy:{GoSet:[src-ip]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {8944024f-deb7-4076-afb3-4b50a2ff4b4b}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:static_routes Mutator:insert Value:{GoSet:[{GoUUID:8944024f-deb7-4076-afb3-4b50a2ff4b4b}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {f6d604c1-9711-4e25-be6c-79ec28bbad1b}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1124 07:58:13.659514 6383 obj_retry.go:551] Creating *factory.egressNode crc took: 2.050517ms\\\\nI1124 07:58:13.659530 6383 factory.go:1336] Added *v1.Node event handler 7\\\\nI1124 07:58:13.659549 6383 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1124 07:58:13.659747 6383 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1124 07:58:13.659815 6383 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1124 07:58:13.659845 6383 ovnkube.go:599] Stopped ovnkube\\\\nI1124 07:58:13.659871 6383 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 07:58:13.659939 6383 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:58:12Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-6f24c_openshift-ovn-kubernetes(106a6e78-a004-4232-a0a2-efecf2f7c248)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:28Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.902294 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pdtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c031c6f0-57e9-4339-ac63-323d1effb276\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39a35927a133bd73526e8f1e2a15274bcc887d91b27b0860cd3260189cae5c0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8qn6m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pdtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:28Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.932231 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:28Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.944490 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.944548 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.944561 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.944586 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.944599 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:28Z","lastTransitionTime":"2025-11-24T07:58:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.948235 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:28Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.961894 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"285fed6b-5793-4f67-8f5a-8bb6bebccfa2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5be30d4ad1d84663c3eb3b0bbca07fc116e71aef333d1a3ef6f669b87d96260\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c51bf95dda4f798cd9f1594fcdbeb3cbc4e53831358b6b16227bd2243f301f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4bjjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:28Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.977635 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-98whr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21147e4f-4335-4c12-9a81-aa333d8301db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvd6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvd6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-98whr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:28Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:28 crc kubenswrapper[4691]: I1124 07:58:28.996402 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:28Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.019932 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:29Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.044776 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:29Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.047278 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.047312 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.047324 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.047343 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.047355 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:29Z","lastTransitionTime":"2025-11-24T07:58:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.074497 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://730c8a45dbc29d5f37334a467817701d86334096f481ad6e97debbffad15b8ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:29Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.097217 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:29Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.150644 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.150709 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.150727 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.150751 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.150766 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:29Z","lastTransitionTime":"2025-11-24T07:58:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.253354 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.253414 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.253426 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.253458 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.253477 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:29Z","lastTransitionTime":"2025-11-24T07:58:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.356505 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.356569 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.356584 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.356607 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.356618 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:29Z","lastTransitionTime":"2025-11-24T07:58:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.460174 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.460236 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.460250 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.460271 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.460286 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:29Z","lastTransitionTime":"2025-11-24T07:58:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.563576 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.563651 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.563668 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.563727 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.563742 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:29Z","lastTransitionTime":"2025-11-24T07:58:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.672111 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.672162 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.672175 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.672195 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.672213 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:29Z","lastTransitionTime":"2025-11-24T07:58:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.760235 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:58:29 crc kubenswrapper[4691]: E1124 07:58:29.760523 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.761481 4691 scope.go:117] "RemoveContainer" containerID="cff243a6a8f5708245922e399a2c8d8f58545c5aeaa7ff1c56c967b75c21733e" Nov 24 07:58:29 crc kubenswrapper[4691]: E1124 07:58:29.761876 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-6f24c_openshift-ovn-kubernetes(106a6e78-a004-4232-a0a2-efecf2f7c248)\"" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.775107 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.775218 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.775249 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.775277 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.775300 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:29Z","lastTransitionTime":"2025-11-24T07:58:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.879161 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.879652 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.879740 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.879834 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.880382 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:29Z","lastTransitionTime":"2025-11-24T07:58:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.986896 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.986943 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.986953 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.986972 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:29 crc kubenswrapper[4691]: I1124 07:58:29.986982 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:29Z","lastTransitionTime":"2025-11-24T07:58:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.092864 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.092910 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.092920 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.092937 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.092947 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:30Z","lastTransitionTime":"2025-11-24T07:58:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.196149 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.196220 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.196240 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.196270 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.196290 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:30Z","lastTransitionTime":"2025-11-24T07:58:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.259629 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-gxxrf_b2332a73-f85c-470c-9209-c5e5cd1bc3a1/kube-multus/0.log" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.259858 4691 generic.go:334] "Generic (PLEG): container finished" podID="b2332a73-f85c-470c-9209-c5e5cd1bc3a1" containerID="ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703" exitCode=1 Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.259955 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-gxxrf" event={"ID":"b2332a73-f85c-470c-9209-c5e5cd1bc3a1","Type":"ContainerDied","Data":"ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703"} Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.260415 4691 scope.go:117] "RemoveContainer" containerID="ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.280589 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:30Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:30Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T07:58:29Z\\\",\\\"message\\\":\\\"2025-11-24T07:57:43+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_1c283d2c-1850-42b7-b9f2-0cf95268058f\\\\n2025-11-24T07:57:43+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_1c283d2c-1850-42b7-b9f2-0cf95268058f to /host/opt/cni/bin/\\\\n2025-11-24T07:57:44Z [verbose] multus-daemon started\\\\n2025-11-24T07:57:44Z [verbose] Readiness Indicator file check\\\\n2025-11-24T07:58:29Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:30Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.298808 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.298850 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.298860 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.298880 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.298891 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:30Z","lastTransitionTime":"2025-11-24T07:58:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.299469 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://730c8a45dbc29d5f37334a467817701d86334096f481ad6e97debbffad15b8ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:30Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.315233 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:30Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.334191 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:30Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.357750 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ace2c8b4-5923-49d7-b19a-8778bf4a4f99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7ef3b97a1e9ea8617dddcbbb0b43f3cbe2b27fc803429fc5d965b766852bb5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://142e19b1c2a5f2e3a2b0dfcf9220cf617333ffe8166d8a1abe10ac11c277f41d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c384d9f46f4574e44a82ec9df2a2700f39fecfaaffdd15506059c4085d89682c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58112d8ac5f25f3d4fca2a520c16125a2511e6a1d64db73aeea92d4a6aaf94d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://58112d8ac5f25f3d4fca2a520c16125a2511e6a1d64db73aeea92d4a6aaf94d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:30Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.373307 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:30Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.389007 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:30Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.402569 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.402634 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.402653 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.402694 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.402716 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:30Z","lastTransitionTime":"2025-11-24T07:58:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.406016 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:30Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.429417 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:30Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.443913 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:30Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.468010 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cff243a6a8f5708245922e399a2c8d8f58545c5aeaa7ff1c56c967b75c21733e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cff243a6a8f5708245922e399a2c8d8f58545c5aeaa7ff1c56c967b75c21733e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T07:58:13Z\\\",\\\"message\\\":\\\"il\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1124 07:58:13.659249 6383 transact.go:42] Configuring OVN: [{Op:update Table:Logical_Router_Static_Route Row:map[ip_prefix:10.217.0.0/22 nexthop:100.64.0.2 policy:{GoSet:[src-ip]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {8944024f-deb7-4076-afb3-4b50a2ff4b4b}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:static_routes Mutator:insert Value:{GoSet:[{GoUUID:8944024f-deb7-4076-afb3-4b50a2ff4b4b}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {f6d604c1-9711-4e25-be6c-79ec28bbad1b}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1124 07:58:13.659514 6383 obj_retry.go:551] Creating *factory.egressNode crc took: 2.050517ms\\\\nI1124 07:58:13.659530 6383 factory.go:1336] Added *v1.Node event handler 7\\\\nI1124 07:58:13.659549 6383 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1124 07:58:13.659747 6383 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1124 07:58:13.659815 6383 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1124 07:58:13.659845 6383 ovnkube.go:599] Stopped ovnkube\\\\nI1124 07:58:13.659871 6383 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 07:58:13.659939 6383 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:58:12Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-6f24c_openshift-ovn-kubernetes(106a6e78-a004-4232-a0a2-efecf2f7c248)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:30Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.486336 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pdtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c031c6f0-57e9-4339-ac63-323d1effb276\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39a35927a133bd73526e8f1e2a15274bcc887d91b27b0860cd3260189cae5c0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8qn6m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pdtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:30Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.506169 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.506261 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.506288 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.506325 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.506349 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:30Z","lastTransitionTime":"2025-11-24T07:58:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.527706 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:30Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.545594 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:30Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.561671 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"285fed6b-5793-4f67-8f5a-8bb6bebccfa2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5be30d4ad1d84663c3eb3b0bbca07fc116e71aef333d1a3ef6f669b87d96260\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c51bf95dda4f798cd9f1594fcdbeb3cbc4e53831358b6b16227bd2243f301f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4bjjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:30Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.575309 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-98whr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21147e4f-4335-4c12-9a81-aa333d8301db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvd6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvd6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-98whr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:30Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.591265 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:30Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.606467 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:30Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.610131 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.610192 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.610207 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.610226 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.610239 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:30Z","lastTransitionTime":"2025-11-24T07:58:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.713376 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.713467 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.713488 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.713510 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.713524 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:30Z","lastTransitionTime":"2025-11-24T07:58:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.760431 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.760536 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.760468 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:58:30 crc kubenswrapper[4691]: E1124 07:58:30.760685 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:58:30 crc kubenswrapper[4691]: E1124 07:58:30.760806 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:58:30 crc kubenswrapper[4691]: E1124 07:58:30.760935 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.816866 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.816926 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.816937 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.816958 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.816973 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:30Z","lastTransitionTime":"2025-11-24T07:58:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.919672 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.919706 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.919714 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.919728 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:30 crc kubenswrapper[4691]: I1124 07:58:30.919738 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:30Z","lastTransitionTime":"2025-11-24T07:58:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.022246 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.022285 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.022294 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.022311 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.022321 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:31Z","lastTransitionTime":"2025-11-24T07:58:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.125280 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.125323 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.125332 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.125349 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.125362 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:31Z","lastTransitionTime":"2025-11-24T07:58:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.233879 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.233945 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.233962 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.233984 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.234002 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:31Z","lastTransitionTime":"2025-11-24T07:58:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.271657 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-gxxrf_b2332a73-f85c-470c-9209-c5e5cd1bc3a1/kube-multus/0.log" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.271727 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-gxxrf" event={"ID":"b2332a73-f85c-470c-9209-c5e5cd1bc3a1","Type":"ContainerStarted","Data":"d2fe7e8123aeaf275afa9ad3f208b5c39433b15d906dbad0c06cd75f8a8183aa"} Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.298785 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:31Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.317662 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ace2c8b4-5923-49d7-b19a-8778bf4a4f99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7ef3b97a1e9ea8617dddcbbb0b43f3cbe2b27fc803429fc5d965b766852bb5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://142e19b1c2a5f2e3a2b0dfcf9220cf617333ffe8166d8a1abe10ac11c277f41d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c384d9f46f4574e44a82ec9df2a2700f39fecfaaffdd15506059c4085d89682c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58112d8ac5f25f3d4fca2a520c16125a2511e6a1d64db73aeea92d4a6aaf94d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://58112d8ac5f25f3d4fca2a520c16125a2511e6a1d64db73aeea92d4a6aaf94d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:31Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.332260 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:31Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.338810 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.338875 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.338889 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.338912 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.338925 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:31Z","lastTransitionTime":"2025-11-24T07:58:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.346149 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:31Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.356081 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pdtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c031c6f0-57e9-4339-ac63-323d1effb276\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39a35927a133bd73526e8f1e2a15274bcc887d91b27b0860cd3260189cae5c0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8qn6m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pdtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:31Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.383102 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:31Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.404864 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:31Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.417073 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:31Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.426307 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:31Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.442206 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.442271 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.442289 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.442316 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.442336 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:31Z","lastTransitionTime":"2025-11-24T07:58:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.447124 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cff243a6a8f5708245922e399a2c8d8f58545c5aeaa7ff1c56c967b75c21733e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cff243a6a8f5708245922e399a2c8d8f58545c5aeaa7ff1c56c967b75c21733e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T07:58:13Z\\\",\\\"message\\\":\\\"il\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1124 07:58:13.659249 6383 transact.go:42] Configuring OVN: [{Op:update Table:Logical_Router_Static_Route Row:map[ip_prefix:10.217.0.0/22 nexthop:100.64.0.2 policy:{GoSet:[src-ip]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {8944024f-deb7-4076-afb3-4b50a2ff4b4b}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:static_routes Mutator:insert Value:{GoSet:[{GoUUID:8944024f-deb7-4076-afb3-4b50a2ff4b4b}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {f6d604c1-9711-4e25-be6c-79ec28bbad1b}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1124 07:58:13.659514 6383 obj_retry.go:551] Creating *factory.egressNode crc took: 2.050517ms\\\\nI1124 07:58:13.659530 6383 factory.go:1336] Added *v1.Node event handler 7\\\\nI1124 07:58:13.659549 6383 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1124 07:58:13.659747 6383 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1124 07:58:13.659815 6383 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1124 07:58:13.659845 6383 ovnkube.go:599] Stopped ovnkube\\\\nI1124 07:58:13.659871 6383 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 07:58:13.659939 6383 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:58:12Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-6f24c_openshift-ovn-kubernetes(106a6e78-a004-4232-a0a2-efecf2f7c248)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:31Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.466586 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:31Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.481031 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:31Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.492938 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"285fed6b-5793-4f67-8f5a-8bb6bebccfa2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5be30d4ad1d84663c3eb3b0bbca07fc116e71aef333d1a3ef6f669b87d96260\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c51bf95dda4f798cd9f1594fcdbeb3cbc4e53831358b6b16227bd2243f301f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4bjjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:31Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.504658 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-98whr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21147e4f-4335-4c12-9a81-aa333d8301db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvd6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvd6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-98whr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:31Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.519281 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:31Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.531539 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:31Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.543710 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2fe7e8123aeaf275afa9ad3f208b5c39433b15d906dbad0c06cd75f8a8183aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T07:58:29Z\\\",\\\"message\\\":\\\"2025-11-24T07:57:43+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_1c283d2c-1850-42b7-b9f2-0cf95268058f\\\\n2025-11-24T07:57:43+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_1c283d2c-1850-42b7-b9f2-0cf95268058f to /host/opt/cni/bin/\\\\n2025-11-24T07:57:44Z [verbose] multus-daemon started\\\\n2025-11-24T07:57:44Z [verbose] Readiness Indicator file check\\\\n2025-11-24T07:58:29Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:58:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:31Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.545086 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.545120 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.545131 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.545148 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.545160 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:31Z","lastTransitionTime":"2025-11-24T07:58:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.559705 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://730c8a45dbc29d5f37334a467817701d86334096f481ad6e97debbffad15b8ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:31Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.648013 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.648069 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.648085 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.648108 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.648123 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:31Z","lastTransitionTime":"2025-11-24T07:58:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.750759 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.750790 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.750800 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.750815 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.750825 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:31Z","lastTransitionTime":"2025-11-24T07:58:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.759783 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:58:31 crc kubenswrapper[4691]: E1124 07:58:31.759913 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.853130 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.853169 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.853178 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.853194 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.853204 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:31Z","lastTransitionTime":"2025-11-24T07:58:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.955834 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.955872 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.955880 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.955896 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:31 crc kubenswrapper[4691]: I1124 07:58:31.955904 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:31Z","lastTransitionTime":"2025-11-24T07:58:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.060079 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.060121 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.060134 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.060153 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.060165 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:32Z","lastTransitionTime":"2025-11-24T07:58:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.163228 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.163290 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.163311 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.163333 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.163350 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:32Z","lastTransitionTime":"2025-11-24T07:58:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.266615 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.266673 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.266690 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.266710 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.266724 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:32Z","lastTransitionTime":"2025-11-24T07:58:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.369707 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.369770 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.369787 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.369809 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.369828 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:32Z","lastTransitionTime":"2025-11-24T07:58:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.473332 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.473396 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.473413 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.473495 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.473522 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:32Z","lastTransitionTime":"2025-11-24T07:58:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.576580 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.576629 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.576642 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.576663 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.576677 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:32Z","lastTransitionTime":"2025-11-24T07:58:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.679828 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.679892 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.679901 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.679918 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.679931 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:32Z","lastTransitionTime":"2025-11-24T07:58:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.759879 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.759888 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:58:32 crc kubenswrapper[4691]: E1124 07:58:32.760035 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:58:32 crc kubenswrapper[4691]: E1124 07:58:32.760116 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.759908 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:58:32 crc kubenswrapper[4691]: E1124 07:58:32.760196 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.782628 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.782673 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.782686 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.782702 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.782712 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:32Z","lastTransitionTime":"2025-11-24T07:58:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.886569 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.886608 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.886619 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.886637 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.886649 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:32Z","lastTransitionTime":"2025-11-24T07:58:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.993382 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.993540 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.994029 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.994067 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:32 crc kubenswrapper[4691]: I1124 07:58:32.994081 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:32Z","lastTransitionTime":"2025-11-24T07:58:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.096412 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.096505 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.096523 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.096546 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.096562 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:33Z","lastTransitionTime":"2025-11-24T07:58:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.200153 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.200206 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.200218 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.200237 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.200253 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:33Z","lastTransitionTime":"2025-11-24T07:58:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.304027 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.304084 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.304097 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.304116 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.304128 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:33Z","lastTransitionTime":"2025-11-24T07:58:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.408150 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.408689 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.408709 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.408737 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.408758 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:33Z","lastTransitionTime":"2025-11-24T07:58:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.513413 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.513550 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.513579 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.513612 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.513636 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:33Z","lastTransitionTime":"2025-11-24T07:58:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.616581 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.616654 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.616670 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.616704 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.616726 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:33Z","lastTransitionTime":"2025-11-24T07:58:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.719990 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.720064 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.720081 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.720111 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.720138 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:33Z","lastTransitionTime":"2025-11-24T07:58:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.759975 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:58:33 crc kubenswrapper[4691]: E1124 07:58:33.760193 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.825497 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.825590 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.825644 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.825672 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.825691 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:33Z","lastTransitionTime":"2025-11-24T07:58:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.927916 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.927976 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.927993 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.928019 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:33 crc kubenswrapper[4691]: I1124 07:58:33.928042 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:33Z","lastTransitionTime":"2025-11-24T07:58:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.031724 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.031850 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.031887 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.031927 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.031954 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:34Z","lastTransitionTime":"2025-11-24T07:58:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.135623 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.135695 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.135713 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.135743 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.135765 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:34Z","lastTransitionTime":"2025-11-24T07:58:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.239881 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.239956 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.239976 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.240005 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.240026 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:34Z","lastTransitionTime":"2025-11-24T07:58:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.343522 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.343614 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.343640 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.343676 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.343701 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:34Z","lastTransitionTime":"2025-11-24T07:58:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.446874 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.446952 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.446970 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.446997 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.447018 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:34Z","lastTransitionTime":"2025-11-24T07:58:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.549951 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.550033 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.550058 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.550088 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.550111 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:34Z","lastTransitionTime":"2025-11-24T07:58:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.653348 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.653413 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.653430 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.653483 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.653502 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:34Z","lastTransitionTime":"2025-11-24T07:58:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.758911 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.758990 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.759005 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.759036 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.759059 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:34Z","lastTransitionTime":"2025-11-24T07:58:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.760330 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:58:34 crc kubenswrapper[4691]: E1124 07:58:34.760468 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.760691 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.760833 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:58:34 crc kubenswrapper[4691]: E1124 07:58:34.761142 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:58:34 crc kubenswrapper[4691]: E1124 07:58:34.761405 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.863866 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.864513 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.864680 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.865297 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.865519 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:34Z","lastTransitionTime":"2025-11-24T07:58:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.969149 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.969477 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.969579 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.969669 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:34 crc kubenswrapper[4691]: I1124 07:58:34.969753 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:34Z","lastTransitionTime":"2025-11-24T07:58:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.072990 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.073053 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.073062 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.073082 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.073095 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:35Z","lastTransitionTime":"2025-11-24T07:58:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.176230 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.176593 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.176621 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.176644 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.176658 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:35Z","lastTransitionTime":"2025-11-24T07:58:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.279912 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.279996 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.280020 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.280051 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.280074 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:35Z","lastTransitionTime":"2025-11-24T07:58:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.384401 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.384516 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.384543 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.384624 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.384653 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:35Z","lastTransitionTime":"2025-11-24T07:58:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.488687 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.488756 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.488779 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.488810 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.488834 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:35Z","lastTransitionTime":"2025-11-24T07:58:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.592003 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.592543 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.592636 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.592892 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.592957 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:35Z","lastTransitionTime":"2025-11-24T07:58:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.696886 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.697160 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.697325 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.697436 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.697547 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:35Z","lastTransitionTime":"2025-11-24T07:58:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.760027 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:58:35 crc kubenswrapper[4691]: E1124 07:58:35.760277 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.801607 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.801675 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.801696 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.801724 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.801762 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:35Z","lastTransitionTime":"2025-11-24T07:58:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.906387 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.906509 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.906570 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.906599 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.906620 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:35Z","lastTransitionTime":"2025-11-24T07:58:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.932818 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.932893 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.932913 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.932943 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.932966 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:35Z","lastTransitionTime":"2025-11-24T07:58:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:35 crc kubenswrapper[4691]: E1124 07:58:35.956886 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:35Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.963082 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.963157 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.963177 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.963207 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.963226 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:35Z","lastTransitionTime":"2025-11-24T07:58:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:35 crc kubenswrapper[4691]: E1124 07:58:35.986298 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:35Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.992224 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.992272 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.992291 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.992319 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:35 crc kubenswrapper[4691]: I1124 07:58:35.992340 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:35Z","lastTransitionTime":"2025-11-24T07:58:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:36 crc kubenswrapper[4691]: E1124 07:58:36.016159 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:36Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.021717 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.021757 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.021767 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.021785 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.021797 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:36Z","lastTransitionTime":"2025-11-24T07:58:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:36 crc kubenswrapper[4691]: E1124 07:58:36.039142 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:36Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.044464 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.044518 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.044541 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.044567 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.044585 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:36Z","lastTransitionTime":"2025-11-24T07:58:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:36 crc kubenswrapper[4691]: E1124 07:58:36.067346 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:36Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:36 crc kubenswrapper[4691]: E1124 07:58:36.067523 4691 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.069981 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.070011 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.070026 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.070047 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.070060 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:36Z","lastTransitionTime":"2025-11-24T07:58:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.174156 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.174236 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.174262 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.174293 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.174318 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:36Z","lastTransitionTime":"2025-11-24T07:58:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.278100 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.278657 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.278821 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.278996 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.279219 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:36Z","lastTransitionTime":"2025-11-24T07:58:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.382181 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.382234 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.382246 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.382267 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.382281 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:36Z","lastTransitionTime":"2025-11-24T07:58:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.485625 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.485705 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.485728 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.485761 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.485782 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:36Z","lastTransitionTime":"2025-11-24T07:58:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.588848 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.588918 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.588935 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.588962 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.589080 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:36Z","lastTransitionTime":"2025-11-24T07:58:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.693092 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.693193 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.693249 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.693282 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.693307 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:36Z","lastTransitionTime":"2025-11-24T07:58:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.760262 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.760344 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.760262 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:58:36 crc kubenswrapper[4691]: E1124 07:58:36.760555 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:58:36 crc kubenswrapper[4691]: E1124 07:58:36.760662 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:58:36 crc kubenswrapper[4691]: E1124 07:58:36.760820 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.797073 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.797150 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.797190 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.797226 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.797253 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:36Z","lastTransitionTime":"2025-11-24T07:58:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.900377 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.900464 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.900480 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.900502 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:36 crc kubenswrapper[4691]: I1124 07:58:36.900515 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:36Z","lastTransitionTime":"2025-11-24T07:58:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.003289 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.003644 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.003687 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.003718 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.003742 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:37Z","lastTransitionTime":"2025-11-24T07:58:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.106484 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.106583 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.106608 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.106641 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.106661 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:37Z","lastTransitionTime":"2025-11-24T07:58:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.210785 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.210853 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.210871 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.210894 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.210913 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:37Z","lastTransitionTime":"2025-11-24T07:58:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.314109 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.314153 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.314163 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.314181 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.314192 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:37Z","lastTransitionTime":"2025-11-24T07:58:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.417397 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.417438 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.417465 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.417483 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.417494 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:37Z","lastTransitionTime":"2025-11-24T07:58:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.520904 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.520940 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.520948 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.520963 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.520973 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:37Z","lastTransitionTime":"2025-11-24T07:58:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.624946 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.625034 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.625061 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.625097 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.625131 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:37Z","lastTransitionTime":"2025-11-24T07:58:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.729649 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.729717 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.729741 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.729775 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.729796 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:37Z","lastTransitionTime":"2025-11-24T07:58:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.759644 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:58:37 crc kubenswrapper[4691]: E1124 07:58:37.760017 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.777156 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.832254 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.832304 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.832315 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.832337 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.832355 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:37Z","lastTransitionTime":"2025-11-24T07:58:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.935356 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.935415 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.935436 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.935550 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:37 crc kubenswrapper[4691]: I1124 07:58:37.935575 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:37Z","lastTransitionTime":"2025-11-24T07:58:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.038940 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.039009 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.039031 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.039060 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.039077 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:38Z","lastTransitionTime":"2025-11-24T07:58:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.142150 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.142201 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.142232 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.142251 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.142263 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:38Z","lastTransitionTime":"2025-11-24T07:58:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.245235 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.245291 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.245303 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.245323 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.245337 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:38Z","lastTransitionTime":"2025-11-24T07:58:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.347823 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.347892 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.347909 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.347932 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.347950 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:38Z","lastTransitionTime":"2025-11-24T07:58:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.451322 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.451378 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.451388 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.451407 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.451419 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:38Z","lastTransitionTime":"2025-11-24T07:58:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.555061 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.555141 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.555165 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.555197 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.555221 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:38Z","lastTransitionTime":"2025-11-24T07:58:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.659534 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.659606 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.659630 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.659678 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.659703 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:38Z","lastTransitionTime":"2025-11-24T07:58:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.760273 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.760441 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.760636 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:58:38 crc kubenswrapper[4691]: E1124 07:58:38.760679 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:58:38 crc kubenswrapper[4691]: E1124 07:58:38.760744 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:58:38 crc kubenswrapper[4691]: E1124 07:58:38.760805 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.762990 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.763059 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.763078 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.763103 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.763120 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:38Z","lastTransitionTime":"2025-11-24T07:58:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.779953 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.797014 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.808392 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"285fed6b-5793-4f67-8f5a-8bb6bebccfa2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5be30d4ad1d84663c3eb3b0bbca07fc116e71aef333d1a3ef6f669b87d96260\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c51bf95dda4f798cd9f1594fcdbeb3cbc4e53831358b6b16227bd2243f301f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4bjjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.818948 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-98whr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21147e4f-4335-4c12-9a81-aa333d8301db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvd6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvd6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-98whr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.833018 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.846822 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.860802 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2fe7e8123aeaf275afa9ad3f208b5c39433b15d906dbad0c06cd75f8a8183aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T07:58:29Z\\\",\\\"message\\\":\\\"2025-11-24T07:57:43+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_1c283d2c-1850-42b7-b9f2-0cf95268058f\\\\n2025-11-24T07:57:43+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_1c283d2c-1850-42b7-b9f2-0cf95268058f to /host/opt/cni/bin/\\\\n2025-11-24T07:57:44Z [verbose] multus-daemon started\\\\n2025-11-24T07:57:44Z [verbose] Readiness Indicator file check\\\\n2025-11-24T07:58:29Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:58:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.865607 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.865650 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.865662 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.865680 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.865694 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:38Z","lastTransitionTime":"2025-11-24T07:58:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.878335 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://730c8a45dbc29d5f37334a467817701d86334096f481ad6e97debbffad15b8ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.889385 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"132a7288-f75c-4408-9e2b-aefc5f524bea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://952741e34a2f95082426fc2d094c5f68671d32a477997392462ca0c54c0686d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2040089a4ee1b2fc3f3f46bf7840d8d71b563a282616da13b2b3cf8e3007f931\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2040089a4ee1b2fc3f3f46bf7840d8d71b563a282616da13b2b3cf8e3007f931\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.901774 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.914532 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ace2c8b4-5923-49d7-b19a-8778bf4a4f99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7ef3b97a1e9ea8617dddcbbb0b43f3cbe2b27fc803429fc5d965b766852bb5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://142e19b1c2a5f2e3a2b0dfcf9220cf617333ffe8166d8a1abe10ac11c277f41d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c384d9f46f4574e44a82ec9df2a2700f39fecfaaffdd15506059c4085d89682c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58112d8ac5f25f3d4fca2a520c16125a2511e6a1d64db73aeea92d4a6aaf94d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://58112d8ac5f25f3d4fca2a520c16125a2511e6a1d64db73aeea92d4a6aaf94d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.926887 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.939818 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.965688 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.968952 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.968981 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.968993 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.969011 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.969024 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:38Z","lastTransitionTime":"2025-11-24T07:58:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.981235 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:38 crc kubenswrapper[4691]: I1124 07:58:38.995476 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:38Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.011104 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:39Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.040427 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cff243a6a8f5708245922e399a2c8d8f58545c5aeaa7ff1c56c967b75c21733e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cff243a6a8f5708245922e399a2c8d8f58545c5aeaa7ff1c56c967b75c21733e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T07:58:13Z\\\",\\\"message\\\":\\\"il\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1124 07:58:13.659249 6383 transact.go:42] Configuring OVN: [{Op:update Table:Logical_Router_Static_Route Row:map[ip_prefix:10.217.0.0/22 nexthop:100.64.0.2 policy:{GoSet:[src-ip]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {8944024f-deb7-4076-afb3-4b50a2ff4b4b}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:static_routes Mutator:insert Value:{GoSet:[{GoUUID:8944024f-deb7-4076-afb3-4b50a2ff4b4b}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {f6d604c1-9711-4e25-be6c-79ec28bbad1b}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1124 07:58:13.659514 6383 obj_retry.go:551] Creating *factory.egressNode crc took: 2.050517ms\\\\nI1124 07:58:13.659530 6383 factory.go:1336] Added *v1.Node event handler 7\\\\nI1124 07:58:13.659549 6383 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1124 07:58:13.659747 6383 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1124 07:58:13.659815 6383 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1124 07:58:13.659845 6383 ovnkube.go:599] Stopped ovnkube\\\\nI1124 07:58:13.659871 6383 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 07:58:13.659939 6383 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:58:12Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-6f24c_openshift-ovn-kubernetes(106a6e78-a004-4232-a0a2-efecf2f7c248)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:39Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.052365 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pdtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c031c6f0-57e9-4339-ac63-323d1effb276\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39a35927a133bd73526e8f1e2a15274bcc887d91b27b0860cd3260189cae5c0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8qn6m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pdtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:39Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.070790 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.070952 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.071026 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.071103 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.071174 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:39Z","lastTransitionTime":"2025-11-24T07:58:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.173506 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.173801 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.173864 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.173927 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.173990 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:39Z","lastTransitionTime":"2025-11-24T07:58:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.276734 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.276768 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.276776 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.276790 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.276798 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:39Z","lastTransitionTime":"2025-11-24T07:58:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.379410 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.379439 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.379466 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.379481 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.379490 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:39Z","lastTransitionTime":"2025-11-24T07:58:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.482000 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.482064 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.482082 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.482108 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.482125 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:39Z","lastTransitionTime":"2025-11-24T07:58:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.584892 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.584934 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.584946 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.584965 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.584977 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:39Z","lastTransitionTime":"2025-11-24T07:58:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.687147 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.687186 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.687197 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.687214 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.687224 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:39Z","lastTransitionTime":"2025-11-24T07:58:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.759511 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:58:39 crc kubenswrapper[4691]: E1124 07:58:39.759682 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.790558 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.790606 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.790619 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.790640 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.790654 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:39Z","lastTransitionTime":"2025-11-24T07:58:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.893820 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.893860 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.893872 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.893889 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.893903 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:39Z","lastTransitionTime":"2025-11-24T07:58:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.997116 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.997178 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.997194 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.997222 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:39 crc kubenswrapper[4691]: I1124 07:58:39.997238 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:39Z","lastTransitionTime":"2025-11-24T07:58:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.100886 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.100956 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.100979 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.101017 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.101038 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:40Z","lastTransitionTime":"2025-11-24T07:58:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.205575 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.205636 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.205655 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.205680 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.205698 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:40Z","lastTransitionTime":"2025-11-24T07:58:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.308096 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.308164 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.308202 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.308239 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.308263 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:40Z","lastTransitionTime":"2025-11-24T07:58:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.411967 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.412108 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.412130 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.412153 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.412218 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:40Z","lastTransitionTime":"2025-11-24T07:58:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.514764 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.514835 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.514862 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.514910 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.514931 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:40Z","lastTransitionTime":"2025-11-24T07:58:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.618438 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.618528 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.618545 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.618568 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.618586 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:40Z","lastTransitionTime":"2025-11-24T07:58:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.721670 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.721798 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.721820 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.722288 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.724511 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:40Z","lastTransitionTime":"2025-11-24T07:58:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.760484 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:58:40 crc kubenswrapper[4691]: E1124 07:58:40.760654 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.760781 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.760951 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:58:40 crc kubenswrapper[4691]: E1124 07:58:40.761071 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:58:40 crc kubenswrapper[4691]: E1124 07:58:40.761197 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.827565 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.827638 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.827651 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.827672 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.827685 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:40Z","lastTransitionTime":"2025-11-24T07:58:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.931386 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.931477 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.931498 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.931529 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:40 crc kubenswrapper[4691]: I1124 07:58:40.931551 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:40Z","lastTransitionTime":"2025-11-24T07:58:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.035490 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.035550 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.035567 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.035589 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.035600 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:41Z","lastTransitionTime":"2025-11-24T07:58:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.138891 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.138959 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.138980 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.139008 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.139027 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:41Z","lastTransitionTime":"2025-11-24T07:58:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.242748 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.242821 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.242836 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.242855 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.242871 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:41Z","lastTransitionTime":"2025-11-24T07:58:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.345819 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.345875 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.345885 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.345905 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.345916 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:41Z","lastTransitionTime":"2025-11-24T07:58:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.448669 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.448781 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.448802 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.448835 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.448857 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:41Z","lastTransitionTime":"2025-11-24T07:58:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.552609 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.552680 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.552704 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.552738 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.552762 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:41Z","lastTransitionTime":"2025-11-24T07:58:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.603338 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 07:58:41 crc kubenswrapper[4691]: E1124 07:58:41.603550 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 07:59:45.603509027 +0000 UTC m=+147.602458316 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.603657 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:58:41 crc kubenswrapper[4691]: E1124 07:58:41.603927 4691 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 07:58:41 crc kubenswrapper[4691]: E1124 07:58:41.604011 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 07:59:45.603990851 +0000 UTC m=+147.602940140 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.656769 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.656844 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.656864 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.656892 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.656912 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:41Z","lastTransitionTime":"2025-11-24T07:58:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.704729 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.704815 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.704859 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:58:41 crc kubenswrapper[4691]: E1124 07:58:41.704915 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 07:58:41 crc kubenswrapper[4691]: E1124 07:58:41.704943 4691 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 07:58:41 crc kubenswrapper[4691]: E1124 07:58:41.704946 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 07:58:41 crc kubenswrapper[4691]: E1124 07:58:41.704971 4691 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:58:41 crc kubenswrapper[4691]: E1124 07:58:41.705016 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 07:59:45.704996664 +0000 UTC m=+147.703945933 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 07:58:41 crc kubenswrapper[4691]: E1124 07:58:41.705037 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-24 07:59:45.705027095 +0000 UTC m=+147.703976354 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:58:41 crc kubenswrapper[4691]: E1124 07:58:41.705121 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 07:58:41 crc kubenswrapper[4691]: E1124 07:58:41.705198 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 07:58:41 crc kubenswrapper[4691]: E1124 07:58:41.705220 4691 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:58:41 crc kubenswrapper[4691]: E1124 07:58:41.705323 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-24 07:59:45.705298353 +0000 UTC m=+147.704247632 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.759991 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:58:41 crc kubenswrapper[4691]: E1124 07:58:41.760121 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.760429 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.760501 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.760516 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.760536 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.760548 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:41Z","lastTransitionTime":"2025-11-24T07:58:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.863432 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.863514 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.863533 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.863558 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.863573 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:41Z","lastTransitionTime":"2025-11-24T07:58:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.965847 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.965899 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.965915 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.965936 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:41 crc kubenswrapper[4691]: I1124 07:58:41.965950 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:41Z","lastTransitionTime":"2025-11-24T07:58:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.069132 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.069173 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.069185 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.069201 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.069213 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:42Z","lastTransitionTime":"2025-11-24T07:58:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.173356 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.173828 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.173980 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.174118 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.174256 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:42Z","lastTransitionTime":"2025-11-24T07:58:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.277277 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.277319 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.277330 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.277348 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.277360 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:42Z","lastTransitionTime":"2025-11-24T07:58:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.379844 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.380112 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.380174 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.380242 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.380304 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:42Z","lastTransitionTime":"2025-11-24T07:58:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.481956 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.482001 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.482013 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.482032 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.482046 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:42Z","lastTransitionTime":"2025-11-24T07:58:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.584610 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.584640 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.584648 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.584661 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.584669 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:42Z","lastTransitionTime":"2025-11-24T07:58:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.686720 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.686755 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.686766 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.686781 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.686790 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:42Z","lastTransitionTime":"2025-11-24T07:58:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.760166 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.760245 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.760549 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.760871 4691 scope.go:117] "RemoveContainer" containerID="cff243a6a8f5708245922e399a2c8d8f58545c5aeaa7ff1c56c967b75c21733e" Nov 24 07:58:42 crc kubenswrapper[4691]: E1124 07:58:42.760874 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:58:42 crc kubenswrapper[4691]: E1124 07:58:42.761048 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:58:42 crc kubenswrapper[4691]: E1124 07:58:42.761192 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.792799 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.792852 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.792873 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.792904 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.792926 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:42Z","lastTransitionTime":"2025-11-24T07:58:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.895235 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.895268 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.895277 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.895295 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.895304 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:42Z","lastTransitionTime":"2025-11-24T07:58:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.997956 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.997996 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.998011 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.998034 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:42 crc kubenswrapper[4691]: I1124 07:58:42.998048 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:42Z","lastTransitionTime":"2025-11-24T07:58:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.101060 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.101104 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.101113 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.101128 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.101138 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:43Z","lastTransitionTime":"2025-11-24T07:58:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.204002 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.204041 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.204053 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.204071 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.204084 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:43Z","lastTransitionTime":"2025-11-24T07:58:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.311716 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.311765 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.311774 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.311790 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.311799 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:43Z","lastTransitionTime":"2025-11-24T07:58:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.317620 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6f24c_106a6e78-a004-4232-a0a2-efecf2f7c248/ovnkube-controller/2.log" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.319419 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" event={"ID":"106a6e78-a004-4232-a0a2-efecf2f7c248","Type":"ContainerStarted","Data":"62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805"} Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.319939 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.330875 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.341688 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.353317 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"285fed6b-5793-4f67-8f5a-8bb6bebccfa2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5be30d4ad1d84663c3eb3b0bbca07fc116e71aef333d1a3ef6f669b87d96260\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c51bf95dda4f798cd9f1594fcdbeb3cbc4e53831358b6b16227bd2243f301f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4bjjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.363868 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-98whr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21147e4f-4335-4c12-9a81-aa333d8301db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvd6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvd6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-98whr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.375705 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.386257 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.396421 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2fe7e8123aeaf275afa9ad3f208b5c39433b15d906dbad0c06cd75f8a8183aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T07:58:29Z\\\",\\\"message\\\":\\\"2025-11-24T07:57:43+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_1c283d2c-1850-42b7-b9f2-0cf95268058f\\\\n2025-11-24T07:57:43+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_1c283d2c-1850-42b7-b9f2-0cf95268058f to /host/opt/cni/bin/\\\\n2025-11-24T07:57:44Z [verbose] multus-daemon started\\\\n2025-11-24T07:57:44Z [verbose] Readiness Indicator file check\\\\n2025-11-24T07:58:29Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:58:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.409314 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://730c8a45dbc29d5f37334a467817701d86334096f481ad6e97debbffad15b8ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.414724 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.414849 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.414925 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.415025 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.415118 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:43Z","lastTransitionTime":"2025-11-24T07:58:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.418735 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"132a7288-f75c-4408-9e2b-aefc5f524bea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://952741e34a2f95082426fc2d094c5f68671d32a477997392462ca0c54c0686d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2040089a4ee1b2fc3f3f46bf7840d8d71b563a282616da13b2b3cf8e3007f931\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2040089a4ee1b2fc3f3f46bf7840d8d71b563a282616da13b2b3cf8e3007f931\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.430190 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.440867 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ace2c8b4-5923-49d7-b19a-8778bf4a4f99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7ef3b97a1e9ea8617dddcbbb0b43f3cbe2b27fc803429fc5d965b766852bb5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://142e19b1c2a5f2e3a2b0dfcf9220cf617333ffe8166d8a1abe10ac11c277f41d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c384d9f46f4574e44a82ec9df2a2700f39fecfaaffdd15506059c4085d89682c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58112d8ac5f25f3d4fca2a520c16125a2511e6a1d64db73aeea92d4a6aaf94d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://58112d8ac5f25f3d4fca2a520c16125a2511e6a1d64db73aeea92d4a6aaf94d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.452898 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.464547 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.486659 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.498899 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.510274 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.518139 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.518180 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.518191 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.518207 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.518217 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:43Z","lastTransitionTime":"2025-11-24T07:58:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.523788 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.546518 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cff243a6a8f5708245922e399a2c8d8f58545c5aeaa7ff1c56c967b75c21733e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T07:58:13Z\\\",\\\"message\\\":\\\"il\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1124 07:58:13.659249 6383 transact.go:42] Configuring OVN: [{Op:update Table:Logical_Router_Static_Route Row:map[ip_prefix:10.217.0.0/22 nexthop:100.64.0.2 policy:{GoSet:[src-ip]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {8944024f-deb7-4076-afb3-4b50a2ff4b4b}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:static_routes Mutator:insert Value:{GoSet:[{GoUUID:8944024f-deb7-4076-afb3-4b50a2ff4b4b}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {f6d604c1-9711-4e25-be6c-79ec28bbad1b}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1124 07:58:13.659514 6383 obj_retry.go:551] Creating *factory.egressNode crc took: 2.050517ms\\\\nI1124 07:58:13.659530 6383 factory.go:1336] Added *v1.Node event handler 7\\\\nI1124 07:58:13.659549 6383 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1124 07:58:13.659747 6383 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1124 07:58:13.659815 6383 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1124 07:58:13.659845 6383 ovnkube.go:599] Stopped ovnkube\\\\nI1124 07:58:13.659871 6383 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 07:58:13.659939 6383 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:58:12Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:58:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.559038 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pdtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c031c6f0-57e9-4339-ac63-323d1effb276\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39a35927a133bd73526e8f1e2a15274bcc887d91b27b0860cd3260189cae5c0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8qn6m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pdtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:43Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.620228 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.620545 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.620623 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.620694 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.620753 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:43Z","lastTransitionTime":"2025-11-24T07:58:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.724052 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.724848 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.725027 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.725207 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.725327 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:43Z","lastTransitionTime":"2025-11-24T07:58:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.760108 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:58:43 crc kubenswrapper[4691]: E1124 07:58:43.760270 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.827913 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.827956 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.827971 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.827989 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.828002 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:43Z","lastTransitionTime":"2025-11-24T07:58:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.930872 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.931312 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.931593 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.931750 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:43 crc kubenswrapper[4691]: I1124 07:58:43.931886 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:43Z","lastTransitionTime":"2025-11-24T07:58:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.035858 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.035934 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.036022 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.036059 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.036086 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:44Z","lastTransitionTime":"2025-11-24T07:58:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.139940 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.140018 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.140046 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.140078 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.140100 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:44Z","lastTransitionTime":"2025-11-24T07:58:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.243170 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.243215 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.243230 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.243253 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.243267 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:44Z","lastTransitionTime":"2025-11-24T07:58:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.346311 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.346362 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.346376 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.346396 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.346409 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:44Z","lastTransitionTime":"2025-11-24T07:58:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.449269 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.449324 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.449363 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.449383 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.449397 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:44Z","lastTransitionTime":"2025-11-24T07:58:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.552951 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.553005 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.553019 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.553038 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.553049 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:44Z","lastTransitionTime":"2025-11-24T07:58:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.655806 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.655864 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.655876 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.655914 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.655927 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:44Z","lastTransitionTime":"2025-11-24T07:58:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.759415 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.759519 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.759544 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.759579 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.759604 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:44Z","lastTransitionTime":"2025-11-24T07:58:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.761181 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.761235 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:58:44 crc kubenswrapper[4691]: E1124 07:58:44.761298 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.761360 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:58:44 crc kubenswrapper[4691]: E1124 07:58:44.761524 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:58:44 crc kubenswrapper[4691]: E1124 07:58:44.761638 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.861628 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.861680 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.861697 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.861719 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.861735 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:44Z","lastTransitionTime":"2025-11-24T07:58:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.965796 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.966279 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.966297 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.966326 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:44 crc kubenswrapper[4691]: I1124 07:58:44.966349 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:44Z","lastTransitionTime":"2025-11-24T07:58:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.070213 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.070574 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.070683 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.070797 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.070889 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:45Z","lastTransitionTime":"2025-11-24T07:58:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.173828 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.174116 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.174429 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.174614 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.174728 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:45Z","lastTransitionTime":"2025-11-24T07:58:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.277829 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.278178 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.278271 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.278362 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.278479 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:45Z","lastTransitionTime":"2025-11-24T07:58:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.382020 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.382336 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.382459 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.382556 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.382646 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:45Z","lastTransitionTime":"2025-11-24T07:58:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.486402 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.486790 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.486862 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.486936 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.487016 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:45Z","lastTransitionTime":"2025-11-24T07:58:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.589874 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.589963 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.589983 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.590014 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.590036 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:45Z","lastTransitionTime":"2025-11-24T07:58:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.693087 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.693321 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.693331 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.693348 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.693362 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:45Z","lastTransitionTime":"2025-11-24T07:58:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.759631 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:58:45 crc kubenswrapper[4691]: E1124 07:58:45.759874 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.796305 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.796376 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.796392 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.796411 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.796426 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:45Z","lastTransitionTime":"2025-11-24T07:58:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.898661 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.898741 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.898766 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.898800 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:45 crc kubenswrapper[4691]: I1124 07:58:45.898826 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:45Z","lastTransitionTime":"2025-11-24T07:58:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.002553 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.002601 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.002615 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.002637 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.002652 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:46Z","lastTransitionTime":"2025-11-24T07:58:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.106001 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.106084 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.106103 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.106130 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.106150 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:46Z","lastTransitionTime":"2025-11-24T07:58:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.209948 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.210012 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.210028 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.210051 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.210065 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:46Z","lastTransitionTime":"2025-11-24T07:58:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.229092 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.229135 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.229146 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.229169 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.229182 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:46Z","lastTransitionTime":"2025-11-24T07:58:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:46 crc kubenswrapper[4691]: E1124 07:58:46.245820 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.250188 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.250236 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.250251 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.250270 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.250283 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:46Z","lastTransitionTime":"2025-11-24T07:58:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:46 crc kubenswrapper[4691]: E1124 07:58:46.262909 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.266082 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.266116 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.266128 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.266146 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.266157 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:46Z","lastTransitionTime":"2025-11-24T07:58:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:46 crc kubenswrapper[4691]: E1124 07:58:46.278382 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.281752 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.281781 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.281793 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.281805 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.281814 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:46Z","lastTransitionTime":"2025-11-24T07:58:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:46 crc kubenswrapper[4691]: E1124 07:58:46.293627 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.297405 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.297439 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.297463 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.297487 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.297497 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:46Z","lastTransitionTime":"2025-11-24T07:58:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:46 crc kubenswrapper[4691]: E1124 07:58:46.308580 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:46Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:46 crc kubenswrapper[4691]: E1124 07:58:46.308694 4691 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.311907 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.311925 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.311932 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.311944 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.311952 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:46Z","lastTransitionTime":"2025-11-24T07:58:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.415010 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.415050 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.415062 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.415078 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.415088 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:46Z","lastTransitionTime":"2025-11-24T07:58:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.517322 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.517356 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.517366 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.517380 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.517388 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:46Z","lastTransitionTime":"2025-11-24T07:58:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.619959 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.620001 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.620009 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.620024 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.620034 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:46Z","lastTransitionTime":"2025-11-24T07:58:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.723426 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.723535 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.723559 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.723588 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.723611 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:46Z","lastTransitionTime":"2025-11-24T07:58:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.760689 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.760851 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.760969 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:58:46 crc kubenswrapper[4691]: E1124 07:58:46.761193 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:58:46 crc kubenswrapper[4691]: E1124 07:58:46.761496 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:58:46 crc kubenswrapper[4691]: E1124 07:58:46.761647 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.827593 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.827680 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.827704 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.827739 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.827796 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:46Z","lastTransitionTime":"2025-11-24T07:58:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.931384 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.931473 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.931501 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.931535 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:46 crc kubenswrapper[4691]: I1124 07:58:46.931555 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:46Z","lastTransitionTime":"2025-11-24T07:58:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.034353 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.034414 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.034489 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.034523 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.034547 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:47Z","lastTransitionTime":"2025-11-24T07:58:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.137005 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.137051 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.137061 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.137077 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.137090 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:47Z","lastTransitionTime":"2025-11-24T07:58:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.239083 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.239146 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.239165 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.239190 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.239208 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:47Z","lastTransitionTime":"2025-11-24T07:58:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.342001 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.342059 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.342073 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.342091 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.342101 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:47Z","lastTransitionTime":"2025-11-24T07:58:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.445713 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.445780 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.445801 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.445834 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.445856 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:47Z","lastTransitionTime":"2025-11-24T07:58:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.548803 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.548864 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.548881 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.548907 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.548930 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:47Z","lastTransitionTime":"2025-11-24T07:58:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.651658 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.651695 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.651706 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.651723 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.651734 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:47Z","lastTransitionTime":"2025-11-24T07:58:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.754479 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.754560 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.754570 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.754584 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.754595 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:47Z","lastTransitionTime":"2025-11-24T07:58:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.759996 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:58:47 crc kubenswrapper[4691]: E1124 07:58:47.760603 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.858069 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.858179 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.858199 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.858228 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.858249 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:47Z","lastTransitionTime":"2025-11-24T07:58:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.961648 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.961702 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.961715 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.961734 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:47 crc kubenswrapper[4691]: I1124 07:58:47.961747 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:47Z","lastTransitionTime":"2025-11-24T07:58:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.065707 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.065776 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.065786 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.065801 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.065811 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:48Z","lastTransitionTime":"2025-11-24T07:58:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.168522 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.168840 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.168978 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.169169 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.169315 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:48Z","lastTransitionTime":"2025-11-24T07:58:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.272616 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.272697 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.272711 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.272733 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.272745 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:48Z","lastTransitionTime":"2025-11-24T07:58:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.375092 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.375129 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.375140 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.375155 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.375166 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:48Z","lastTransitionTime":"2025-11-24T07:58:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.477376 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.477413 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.477422 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.477436 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.477466 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:48Z","lastTransitionTime":"2025-11-24T07:58:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.579829 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.579869 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.579878 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.579894 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.579904 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:48Z","lastTransitionTime":"2025-11-24T07:58:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.683049 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.683126 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.683151 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.683182 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.683205 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:48Z","lastTransitionTime":"2025-11-24T07:58:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.760375 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.760429 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.760375 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:58:48 crc kubenswrapper[4691]: E1124 07:58:48.760891 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:58:48 crc kubenswrapper[4691]: E1124 07:58:48.761129 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:58:48 crc kubenswrapper[4691]: E1124 07:58:48.761204 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.783989 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.786132 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.786257 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.786357 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.786467 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.786710 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:48Z","lastTransitionTime":"2025-11-24T07:58:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.803673 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.818326 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.829581 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.867262 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cff243a6a8f5708245922e399a2c8d8f58545c5aeaa7ff1c56c967b75c21733e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T07:58:13Z\\\",\\\"message\\\":\\\"il\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1124 07:58:13.659249 6383 transact.go:42] Configuring OVN: [{Op:update Table:Logical_Router_Static_Route Row:map[ip_prefix:10.217.0.0/22 nexthop:100.64.0.2 policy:{GoSet:[src-ip]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {8944024f-deb7-4076-afb3-4b50a2ff4b4b}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:static_routes Mutator:insert Value:{GoSet:[{GoUUID:8944024f-deb7-4076-afb3-4b50a2ff4b4b}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {f6d604c1-9711-4e25-be6c-79ec28bbad1b}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1124 07:58:13.659514 6383 obj_retry.go:551] Creating *factory.egressNode crc took: 2.050517ms\\\\nI1124 07:58:13.659530 6383 factory.go:1336] Added *v1.Node event handler 7\\\\nI1124 07:58:13.659549 6383 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1124 07:58:13.659747 6383 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1124 07:58:13.659815 6383 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1124 07:58:13.659845 6383 ovnkube.go:599] Stopped ovnkube\\\\nI1124 07:58:13.659871 6383 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 07:58:13.659939 6383 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:58:12Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:58:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.883541 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pdtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c031c6f0-57e9-4339-ac63-323d1effb276\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39a35927a133bd73526e8f1e2a15274bcc887d91b27b0860cd3260189cae5c0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8qn6m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pdtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.891180 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.891241 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.891254 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.891275 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.891288 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:48Z","lastTransitionTime":"2025-11-24T07:58:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.896760 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.911147 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.924563 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"285fed6b-5793-4f67-8f5a-8bb6bebccfa2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5be30d4ad1d84663c3eb3b0bbca07fc116e71aef333d1a3ef6f669b87d96260\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c51bf95dda4f798cd9f1594fcdbeb3cbc4e53831358b6b16227bd2243f301f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4bjjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.937066 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-98whr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21147e4f-4335-4c12-9a81-aa333d8301db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvd6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvd6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-98whr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.949476 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.960058 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.970224 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2fe7e8123aeaf275afa9ad3f208b5c39433b15d906dbad0c06cd75f8a8183aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T07:58:29Z\\\",\\\"message\\\":\\\"2025-11-24T07:57:43+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_1c283d2c-1850-42b7-b9f2-0cf95268058f\\\\n2025-11-24T07:57:43+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_1c283d2c-1850-42b7-b9f2-0cf95268058f to /host/opt/cni/bin/\\\\n2025-11-24T07:57:44Z [verbose] multus-daemon started\\\\n2025-11-24T07:57:44Z [verbose] Readiness Indicator file check\\\\n2025-11-24T07:58:29Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:58:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.983566 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://730c8a45dbc29d5f37334a467817701d86334096f481ad6e97debbffad15b8ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.992821 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"132a7288-f75c-4408-9e2b-aefc5f524bea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://952741e34a2f95082426fc2d094c5f68671d32a477997392462ca0c54c0686d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2040089a4ee1b2fc3f3f46bf7840d8d71b563a282616da13b2b3cf8e3007f931\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2040089a4ee1b2fc3f3f46bf7840d8d71b563a282616da13b2b3cf8e3007f931\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:48Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.994745 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.994783 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.994794 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.994811 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:48 crc kubenswrapper[4691]: I1124 07:58:48.994823 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:48Z","lastTransitionTime":"2025-11-24T07:58:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.007893 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.022686 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ace2c8b4-5923-49d7-b19a-8778bf4a4f99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7ef3b97a1e9ea8617dddcbbb0b43f3cbe2b27fc803429fc5d965b766852bb5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://142e19b1c2a5f2e3a2b0dfcf9220cf617333ffe8166d8a1abe10ac11c277f41d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c384d9f46f4574e44a82ec9df2a2700f39fecfaaffdd15506059c4085d89682c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58112d8ac5f25f3d4fca2a520c16125a2511e6a1d64db73aeea92d4a6aaf94d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://58112d8ac5f25f3d4fca2a520c16125a2511e6a1d64db73aeea92d4a6aaf94d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.037197 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.050147 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:49Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.097743 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.097796 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.097809 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.097827 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.097840 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:49Z","lastTransitionTime":"2025-11-24T07:58:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.202588 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.202636 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.202647 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.202662 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.202673 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:49Z","lastTransitionTime":"2025-11-24T07:58:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.306458 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.306507 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.306517 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.306533 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.306545 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:49Z","lastTransitionTime":"2025-11-24T07:58:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.409962 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.410017 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.410065 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.410086 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.410098 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:49Z","lastTransitionTime":"2025-11-24T07:58:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.514091 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.514139 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.514149 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.514167 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.514182 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:49Z","lastTransitionTime":"2025-11-24T07:58:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.616935 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.617392 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.618054 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.618151 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.618219 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:49Z","lastTransitionTime":"2025-11-24T07:58:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.722033 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.722101 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.722118 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.722135 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.722149 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:49Z","lastTransitionTime":"2025-11-24T07:58:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.760226 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:58:49 crc kubenswrapper[4691]: E1124 07:58:49.760677 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.825403 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.825642 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.825669 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.825701 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.825721 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:49Z","lastTransitionTime":"2025-11-24T07:58:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.928489 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.928542 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.928556 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.928576 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:49 crc kubenswrapper[4691]: I1124 07:58:49.928589 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:49Z","lastTransitionTime":"2025-11-24T07:58:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.032228 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.032316 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.032341 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.032378 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.032397 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:50Z","lastTransitionTime":"2025-11-24T07:58:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.135882 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.135935 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.135949 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.135967 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.135977 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:50Z","lastTransitionTime":"2025-11-24T07:58:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.238476 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.238525 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.238539 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.238555 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.238567 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:50Z","lastTransitionTime":"2025-11-24T07:58:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.340607 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.340650 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.340660 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.340675 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.340684 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:50Z","lastTransitionTime":"2025-11-24T07:58:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.443910 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.443966 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.443976 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.443994 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.444003 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:50Z","lastTransitionTime":"2025-11-24T07:58:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.548568 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.548616 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.548627 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.548643 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.548652 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:50Z","lastTransitionTime":"2025-11-24T07:58:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.651533 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.651590 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.651598 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.651614 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.651622 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:50Z","lastTransitionTime":"2025-11-24T07:58:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.754351 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.754415 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.754427 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.754478 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.754492 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:50Z","lastTransitionTime":"2025-11-24T07:58:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.760040 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.760142 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:58:50 crc kubenswrapper[4691]: E1124 07:58:50.760188 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.760226 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:58:50 crc kubenswrapper[4691]: E1124 07:58:50.760364 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:58:50 crc kubenswrapper[4691]: E1124 07:58:50.760470 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.857375 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.857422 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.857431 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.857466 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.857482 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:50Z","lastTransitionTime":"2025-11-24T07:58:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.961032 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.961079 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.961088 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.961104 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:50 crc kubenswrapper[4691]: I1124 07:58:50.961114 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:50Z","lastTransitionTime":"2025-11-24T07:58:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.064611 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.064665 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.064678 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.064697 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.064711 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:51Z","lastTransitionTime":"2025-11-24T07:58:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.167323 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.167439 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.167496 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.167516 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.167528 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:51Z","lastTransitionTime":"2025-11-24T07:58:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.270309 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.270411 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.270476 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.270500 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.270514 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:51Z","lastTransitionTime":"2025-11-24T07:58:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.373108 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.373179 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.373191 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.373210 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.373223 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:51Z","lastTransitionTime":"2025-11-24T07:58:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.475842 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.475899 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.475909 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.475923 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.475932 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:51Z","lastTransitionTime":"2025-11-24T07:58:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.579321 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.579429 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.579439 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.579469 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.579480 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:51Z","lastTransitionTime":"2025-11-24T07:58:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.685604 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.685693 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.685709 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.685731 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.685743 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:51Z","lastTransitionTime":"2025-11-24T07:58:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.759633 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:58:51 crc kubenswrapper[4691]: E1124 07:58:51.759788 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.789152 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.789225 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.789246 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.789275 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.789298 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:51Z","lastTransitionTime":"2025-11-24T07:58:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.892521 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.892580 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.892598 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.892621 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.892637 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:51Z","lastTransitionTime":"2025-11-24T07:58:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.995079 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.995154 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.995177 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.995204 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:51 crc kubenswrapper[4691]: I1124 07:58:51.995222 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:51Z","lastTransitionTime":"2025-11-24T07:58:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.097975 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.098068 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.098082 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.098099 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.098113 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:52Z","lastTransitionTime":"2025-11-24T07:58:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.201503 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.201578 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.201597 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.201625 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.201646 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:52Z","lastTransitionTime":"2025-11-24T07:58:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.304518 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.304570 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.304584 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.304604 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.304617 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:52Z","lastTransitionTime":"2025-11-24T07:58:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.407820 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.407867 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.407877 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.407894 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.407911 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:52Z","lastTransitionTime":"2025-11-24T07:58:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.510663 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.510770 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.510792 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.510821 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.510841 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:52Z","lastTransitionTime":"2025-11-24T07:58:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.613726 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.613785 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.613798 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.613819 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.613833 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:52Z","lastTransitionTime":"2025-11-24T07:58:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.717316 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.717376 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.717390 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.717412 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.717428 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:52Z","lastTransitionTime":"2025-11-24T07:58:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.760224 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.760292 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.760554 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:58:52 crc kubenswrapper[4691]: E1124 07:58:52.760664 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:58:52 crc kubenswrapper[4691]: E1124 07:58:52.760786 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:58:52 crc kubenswrapper[4691]: E1124 07:58:52.761032 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.819960 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.820003 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.820016 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.820033 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.820046 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:52Z","lastTransitionTime":"2025-11-24T07:58:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.922926 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.922967 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.922976 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.922992 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:52 crc kubenswrapper[4691]: I1124 07:58:52.923002 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:52Z","lastTransitionTime":"2025-11-24T07:58:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.025259 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.025309 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.025323 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.025341 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.025354 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:53Z","lastTransitionTime":"2025-11-24T07:58:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.128613 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.128685 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.128697 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.128716 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.128730 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:53Z","lastTransitionTime":"2025-11-24T07:58:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.235635 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.235715 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.235737 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.235764 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.235784 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:53Z","lastTransitionTime":"2025-11-24T07:58:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.340335 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.340418 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.340440 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.340506 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.340531 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:53Z","lastTransitionTime":"2025-11-24T07:58:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.443844 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.444299 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.444481 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.444668 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.444806 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:53Z","lastTransitionTime":"2025-11-24T07:58:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.548701 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.548766 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.548783 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.548809 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.548827 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:53Z","lastTransitionTime":"2025-11-24T07:58:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.652593 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.652662 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.652683 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.652718 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.652739 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:53Z","lastTransitionTime":"2025-11-24T07:58:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.756799 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.756859 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.756878 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.756907 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.756926 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:53Z","lastTransitionTime":"2025-11-24T07:58:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.760124 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:58:53 crc kubenswrapper[4691]: E1124 07:58:53.760404 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.860624 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.860707 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.860732 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.860769 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.860791 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:53Z","lastTransitionTime":"2025-11-24T07:58:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.963968 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.964043 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.964059 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.964088 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:53 crc kubenswrapper[4691]: I1124 07:58:53.964113 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:53Z","lastTransitionTime":"2025-11-24T07:58:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.067739 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.067806 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.067817 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.067840 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.067852 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:54Z","lastTransitionTime":"2025-11-24T07:58:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.259519 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.259587 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.259607 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.259635 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.259657 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:54Z","lastTransitionTime":"2025-11-24T07:58:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.362768 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.362867 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.362893 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.362924 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.362944 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:54Z","lastTransitionTime":"2025-11-24T07:58:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.466026 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.466100 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.466115 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.466140 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.466161 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:54Z","lastTransitionTime":"2025-11-24T07:58:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.569627 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.569712 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.569731 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.569759 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.569780 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:54Z","lastTransitionTime":"2025-11-24T07:58:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.672562 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.672647 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.672688 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.672723 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.672748 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:54Z","lastTransitionTime":"2025-11-24T07:58:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.760187 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.760363 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:58:54 crc kubenswrapper[4691]: E1124 07:58:54.760777 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:58:54 crc kubenswrapper[4691]: E1124 07:58:54.761051 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.761543 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:58:54 crc kubenswrapper[4691]: E1124 07:58:54.761623 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.775522 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.775676 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.775703 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.775731 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.775756 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:54Z","lastTransitionTime":"2025-11-24T07:58:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.879049 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.879105 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.879117 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.879136 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.879150 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:54Z","lastTransitionTime":"2025-11-24T07:58:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.982065 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.982117 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.982130 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.982152 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:54 crc kubenswrapper[4691]: I1124 07:58:54.982168 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:54Z","lastTransitionTime":"2025-11-24T07:58:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.084982 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.085036 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.085047 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.085067 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.085081 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:55Z","lastTransitionTime":"2025-11-24T07:58:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.188073 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.188130 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.188142 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.188165 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.188179 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:55Z","lastTransitionTime":"2025-11-24T07:58:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.292855 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.292930 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.292951 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.292980 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.293003 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:55Z","lastTransitionTime":"2025-11-24T07:58:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.396680 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.396767 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.396785 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.396815 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.396834 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:55Z","lastTransitionTime":"2025-11-24T07:58:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.500997 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.501071 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.501089 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.501118 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.501144 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:55Z","lastTransitionTime":"2025-11-24T07:58:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.605160 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.605236 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.605255 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.605289 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.605310 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:55Z","lastTransitionTime":"2025-11-24T07:58:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.709364 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.709547 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.709568 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.709614 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.709648 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:55Z","lastTransitionTime":"2025-11-24T07:58:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.759635 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:58:55 crc kubenswrapper[4691]: E1124 07:58:55.759807 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.813004 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.813069 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.813091 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.813120 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.813142 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:55Z","lastTransitionTime":"2025-11-24T07:58:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.916961 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.917009 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.917022 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.917041 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:55 crc kubenswrapper[4691]: I1124 07:58:55.917058 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:55Z","lastTransitionTime":"2025-11-24T07:58:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.020378 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.020425 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.020437 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.020476 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.020489 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:56Z","lastTransitionTime":"2025-11-24T07:58:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.129078 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.129415 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.129426 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.129761 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.129777 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:56Z","lastTransitionTime":"2025-11-24T07:58:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.233049 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.233085 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.233093 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.233111 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.233122 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:56Z","lastTransitionTime":"2025-11-24T07:58:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.336878 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.336951 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.336962 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.336979 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.336988 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:56Z","lastTransitionTime":"2025-11-24T07:58:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.440164 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.440202 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.440211 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.440225 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.440234 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:56Z","lastTransitionTime":"2025-11-24T07:58:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.543130 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.543167 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.543175 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.543189 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.543197 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:56Z","lastTransitionTime":"2025-11-24T07:58:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.566299 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.566335 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.566344 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.566362 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.566370 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:56Z","lastTransitionTime":"2025-11-24T07:58:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:56 crc kubenswrapper[4691]: E1124 07:58:56.583547 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:56Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.588170 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.588208 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.588221 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.588241 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.588255 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:56Z","lastTransitionTime":"2025-11-24T07:58:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:56 crc kubenswrapper[4691]: E1124 07:58:56.600587 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:56Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.605142 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.605165 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.605173 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.605188 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.605197 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:56Z","lastTransitionTime":"2025-11-24T07:58:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:56 crc kubenswrapper[4691]: E1124 07:58:56.617956 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:56Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.623038 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.623095 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.623113 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.623138 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.623157 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:56Z","lastTransitionTime":"2025-11-24T07:58:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:56 crc kubenswrapper[4691]: E1124 07:58:56.637635 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:56Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.642341 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.642421 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.642492 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.642517 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.642557 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:56Z","lastTransitionTime":"2025-11-24T07:58:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:56 crc kubenswrapper[4691]: E1124 07:58:56.654813 4691 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8d11f6fa-6bf9-4115-b9e3-5cfc4b48395c\\\",\\\"systemUUID\\\":\\\"5253ba1a-9775-49a3-ac2c-46321419cc02\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:56Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:56 crc kubenswrapper[4691]: E1124 07:58:56.655031 4691 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.657318 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.657381 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.657397 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.657419 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.657434 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:56Z","lastTransitionTime":"2025-11-24T07:58:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.759600 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.759638 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.759727 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:58:56 crc kubenswrapper[4691]: E1124 07:58:56.759845 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:58:56 crc kubenswrapper[4691]: E1124 07:58:56.760095 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:58:56 crc kubenswrapper[4691]: E1124 07:58:56.760213 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.761644 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.761688 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.761702 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.761721 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.761735 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:56Z","lastTransitionTime":"2025-11-24T07:58:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.864776 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.864826 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.864838 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.864858 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.864873 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:56Z","lastTransitionTime":"2025-11-24T07:58:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.967726 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.967791 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.967808 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.967835 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:56 crc kubenswrapper[4691]: I1124 07:58:56.967855 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:56Z","lastTransitionTime":"2025-11-24T07:58:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.070947 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.070990 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.071002 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.071019 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.071032 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:57Z","lastTransitionTime":"2025-11-24T07:58:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.174129 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.174189 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.174199 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.174218 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.174230 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:57Z","lastTransitionTime":"2025-11-24T07:58:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.277922 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.277976 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.277991 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.278014 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.278028 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:57Z","lastTransitionTime":"2025-11-24T07:58:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.380168 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.380201 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.380210 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.380223 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.380231 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:57Z","lastTransitionTime":"2025-11-24T07:58:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.483131 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.483206 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.483227 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.483257 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.483276 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:57Z","lastTransitionTime":"2025-11-24T07:58:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.592478 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.592528 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.592545 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.592568 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.592587 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:57Z","lastTransitionTime":"2025-11-24T07:58:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.695112 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.695160 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.695171 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.695188 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.695200 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:57Z","lastTransitionTime":"2025-11-24T07:58:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.759904 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:58:57 crc kubenswrapper[4691]: E1124 07:58:57.760100 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.797931 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.798029 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.798084 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.798113 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.798133 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:57Z","lastTransitionTime":"2025-11-24T07:58:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.900840 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.900913 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.900931 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.900957 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:57 crc kubenswrapper[4691]: I1124 07:58:57.900975 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:57Z","lastTransitionTime":"2025-11-24T07:58:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.005887 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.005983 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.006020 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.006040 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.006058 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:58Z","lastTransitionTime":"2025-11-24T07:58:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.108524 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.108614 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.108647 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.108680 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.108699 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:58Z","lastTransitionTime":"2025-11-24T07:58:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.212108 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.212170 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.212180 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.212200 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.212211 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:58Z","lastTransitionTime":"2025-11-24T07:58:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.315109 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.315193 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.315217 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.315252 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.315275 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:58Z","lastTransitionTime":"2025-11-24T07:58:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.418937 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.418991 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.419003 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.419021 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.419033 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:58Z","lastTransitionTime":"2025-11-24T07:58:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.522763 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.522876 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.522895 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.522918 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.522935 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:58Z","lastTransitionTime":"2025-11-24T07:58:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.625177 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.625220 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.625230 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.625247 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.625257 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:58Z","lastTransitionTime":"2025-11-24T07:58:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.728993 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.729048 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.729058 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.729077 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.729089 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:58Z","lastTransitionTime":"2025-11-24T07:58:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.760918 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.761109 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:58:58 crc kubenswrapper[4691]: E1124 07:58:58.761177 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.761212 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:58:58 crc kubenswrapper[4691]: E1124 07:58:58.761323 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:58:58 crc kubenswrapper[4691]: E1124 07:58:58.761532 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.784902 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab18708aa4b58b1e4e97eec1b259d8f4b26bcb650049ff1b2e8570a116eff9be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d77e733f5213d983c7ff7728017191fe1c2e1d7c1a0df28f20dba2649e50728f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:58Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.801925 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:58Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.818395 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"285fed6b-5793-4f67-8f5a-8bb6bebccfa2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5be30d4ad1d84663c3eb3b0bbca07fc116e71aef333d1a3ef6f669b87d96260\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c51bf95dda4f798cd9f1594fcdbeb3cbc4e53831358b6b16227bd2243f301f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zkxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4bjjp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:58Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.832071 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.832163 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.832216 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.832276 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.832294 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:58Z","lastTransitionTime":"2025-11-24T07:58:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.832315 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-98whr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21147e4f-4335-4c12-9a81-aa333d8301db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvd6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vvd6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:56Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-98whr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:58Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.851696 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"726f0365-95cd-4ed9-b204-2e35d8637477\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://880176c6ee93b27f15a99d06b6ab3c11e199a062994a75a27bd892ee02b59932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fad7ebe4a011bc824aa932a265d85415344d72d60cbb653e5f014d4deaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1457488bfbada35a03f87e61cba168464a332580f3ece6d2c0b15faea687337\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f6ce485c417cb054cf295686e5aae0149624d62e42b71bb6d682e0602134da6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:58Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.871909 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:58Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.887852 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gxxrf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2332a73-f85c-470c-9209-c5e5cd1bc3a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2fe7e8123aeaf275afa9ad3f208b5c39433b15d906dbad0c06cd75f8a8183aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T07:58:29Z\\\",\\\"message\\\":\\\"2025-11-24T07:57:43+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_1c283d2c-1850-42b7-b9f2-0cf95268058f\\\\n2025-11-24T07:57:43+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_1c283d2c-1850-42b7-b9f2-0cf95268058f to /host/opt/cni/bin/\\\\n2025-11-24T07:57:44Z [verbose] multus-daemon started\\\\n2025-11-24T07:57:44Z [verbose] Readiness Indicator file check\\\\n2025-11-24T07:58:29Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:58:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jhm5j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gxxrf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:58Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.904383 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27140986-dd30-4f6b-beac-d173dca9a94c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://730c8a45dbc29d5f37334a467817701d86334096f481ad6e97debbffad15b8ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85a5a2f31fedfbbe3ad01b48655483c0dd302d276a7ac55fb67563df6e0ab625\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4255c14ee01f3b53e0810518f8378de8d8fa721e80ae8afd01d473b9ca64e69\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ea7941a7924fc423bc4b4fadc464db3666385389b9d07e93b9debaff460266b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c9a5f9f1ec4fff64e3a30af4ef42bde48383128d7bbb305e7994f485136b3301\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e5a85b5510d8f17e710cb27dbc90335ec2425f22885fff95c67818c3bedbbb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://981de5690c275e0ef4768b69dcaa6df9396e10c5c59e39323b8c8d4f03c7bba5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zpl5k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-zw5l9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:58Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.916873 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54ccc455-9127-4afd-b3a4-7fc35181bf93\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://df23d83b2ec1d3b226650c37861a93ff0671dc0af3639d97f5db0bfd06885577\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fxkfc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-fcwmc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:58Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.951428 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.951498 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.951519 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.951543 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.951564 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:58Z","lastTransitionTime":"2025-11-24T07:58:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.960697 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"132a7288-f75c-4408-9e2b-aefc5f524bea\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://952741e34a2f95082426fc2d094c5f68671d32a477997392462ca0c54c0686d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2040089a4ee1b2fc3f3f46bf7840d8d71b563a282616da13b2b3cf8e3007f931\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2040089a4ee1b2fc3f3f46bf7840d8d71b563a282616da13b2b3cf8e3007f931\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:58Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:58 crc kubenswrapper[4691]: I1124 07:58:58.990003 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc2a4056-2f5f-481a-a825-ada120869196\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac7d4313c9b26f798c6e8b45eea7a3e739b2a774e7d82f94afc669e2af3eb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://172c159ffd9845ee0832caaff33ce8dd2db8c36306c6ee825a188abebb141d8a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7dc6b0535b6047e65f318017c0de60dc42560a7349060e1c6b1e01d4f4184c6e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5fdc6f11369c19d3e2285ab3c27172f49f7d71014767776c24ff83d217b8484\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f1da4ab77a4a128f5631fe0caa206ddae2b5109745dc4df66af54126469c2b5\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-24T07:57:37Z\\\",\\\"message\\\":\\\"1124 07:57:22.246685 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-409969175/tls.crt::/tmp/serving-cert-409969175/tls.key\\\\\\\"\\\\nI1124 07:57:37.699529 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1124 07:57:37.705235 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1124 07:57:37.705266 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1124 07:57:37.705298 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1124 07:57:37.705308 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1124 07:57:37.715119 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1124 07:57:37.715141 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715191 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1124 07:57:37.715195 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1124 07:57:37.715198 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1124 07:57:37.715201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1124 07:57:37.715204 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1124 07:57:37.715465 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1124 07:57:37.718788 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1124 07:57:37.718853 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1124 07:57:37.719237 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc263cb39cdb77417de4894ad59ee58bba22e97448b0441a367eefed28ed6839\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6b89eeeeaf41be3778dd5572128382f3902906da6c5063ee5fe3fe6cc1909102\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:58Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.008133 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ace2c8b4-5923-49d7-b19a-8778bf4a4f99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7ef3b97a1e9ea8617dddcbbb0b43f3cbe2b27fc803429fc5d965b766852bb5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://142e19b1c2a5f2e3a2b0dfcf9220cf617333ffe8166d8a1abe10ac11c277f41d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c384d9f46f4574e44a82ec9df2a2700f39fecfaaffdd15506059c4085d89682c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58112d8ac5f25f3d4fca2a520c16125a2511e6a1d64db73aeea92d4a6aaf94d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://58112d8ac5f25f3d4fca2a520c16125a2511e6a1d64db73aeea92d4a6aaf94d8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:59Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.021965 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:59Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.045713 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"106a6e78-a004-4232-a0a2-efecf2f7c248\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cff243a6a8f5708245922e399a2c8d8f58545c5aeaa7ff1c56c967b75c21733e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-24T07:58:13Z\\\",\\\"message\\\":\\\"il\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1124 07:58:13.659249 6383 transact.go:42] Configuring OVN: [{Op:update Table:Logical_Router_Static_Route Row:map[ip_prefix:10.217.0.0/22 nexthop:100.64.0.2 policy:{GoSet:[src-ip]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {8944024f-deb7-4076-afb3-4b50a2ff4b4b}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:static_routes Mutator:insert Value:{GoSet:[{GoUUID:8944024f-deb7-4076-afb3-4b50a2ff4b4b}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {f6d604c1-9711-4e25-be6c-79ec28bbad1b}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1124 07:58:13.659514 6383 obj_retry.go:551] Creating *factory.egressNode crc took: 2.050517ms\\\\nI1124 07:58:13.659530 6383 factory.go:1336] Added *v1.Node event handler 7\\\\nI1124 07:58:13.659549 6383 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1124 07:58:13.659747 6383 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1124 07:58:13.659815 6383 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1124 07:58:13.659845 6383 ovnkube.go:599] Stopped ovnkube\\\\nI1124 07:58:13.659871 6383 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1124 07:58:13.659939 6383 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-24T07:58:12Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:58:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-djg58\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6f24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:59Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.054542 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.054573 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.054581 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.054615 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.054626 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:59Z","lastTransitionTime":"2025-11-24T07:58:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.059159 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7pdtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c031c6f0-57e9-4339-ac63-323d1effb276\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39a35927a133bd73526e8f1e2a15274bcc887d91b27b0860cd3260189cae5c0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8qn6m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:44Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7pdtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:59Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.082523 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4bd85c10-d0d2-429f-a436-922fb7085749\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11a8f3ebef95156c43c3ed468ad7e38ddb292342e1c287681eb4c3d35919c4ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fb4e47bd82466ce3db7fdc289fb9643a7b15b4e00148d0c35031d5d8eb871dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5aff4b4f3bc61adf8a80ee89673e84c5999d1d33f62b419aceb87379dac44ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e9279b4def0865395782c8e2ee71e0a63cfc0244a20dab6b652a46a9946840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c27685acd91f72fe7ace682dadcebdb985d242c445e9c943642e4b20b8f79fa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dd500dfa4e67f2e2c5859ccba29829b30efe0de2dec004ac39ee3d52b049ac54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac034f88462c0d56419e346da51071f7c707c34594d2fe8f75ea5982eb049e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba93bee6d38f07c8b52ca30ac75a5ad2b9a18104153bf43c3134fba5f6de05cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-24T07:57:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-24T07:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:18Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:59Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.098112 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76fd2423d26e46b1416724652d395dc809dbaec389e9e77c536f7bc625f6c55a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:59Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.110808 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:40Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0395440e0c6d75ac902601d7b76bdd89d3dd8c627272d3049812a70c83cc20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:59Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.123199 4691 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-frdx5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b886b151-658b-493c-b186-658ca0533f06\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-24T07:57:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cb08ff1705d7211718aa61b10c7d20a9e29fe363ec846eec34d12f535dbcbb2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-24T07:57:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9cf4x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-24T07:57:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-frdx5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-24T07:58:59Z is after 2025-08-24T17:21:41Z" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.157904 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.157970 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.157985 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.158006 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.158019 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:59Z","lastTransitionTime":"2025-11-24T07:58:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.260508 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.260546 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.260557 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.260574 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.260585 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:59Z","lastTransitionTime":"2025-11-24T07:58:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.363267 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.363311 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.363328 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.363351 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.363367 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:59Z","lastTransitionTime":"2025-11-24T07:58:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.466203 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.466252 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.466264 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.466281 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.466293 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:59Z","lastTransitionTime":"2025-11-24T07:58:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.570252 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.570315 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.570327 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.570385 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.570411 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:59Z","lastTransitionTime":"2025-11-24T07:58:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.673194 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.673254 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.673265 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.673288 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.673301 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:59Z","lastTransitionTime":"2025-11-24T07:58:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.760100 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:58:59 crc kubenswrapper[4691]: E1124 07:58:59.760336 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.775812 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.775906 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.775944 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.775978 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.776001 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:59Z","lastTransitionTime":"2025-11-24T07:58:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.879649 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.879728 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.879752 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.879786 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.879811 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:59Z","lastTransitionTime":"2025-11-24T07:58:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.983020 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.983072 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.983088 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.983117 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:58:59 crc kubenswrapper[4691]: I1124 07:58:59.983139 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:58:59Z","lastTransitionTime":"2025-11-24T07:58:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.086970 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.087011 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.087020 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.087035 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.087044 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:00Z","lastTransitionTime":"2025-11-24T07:59:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.116547 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="ovnkube-controller" probeResult="failure" output="" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.189971 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.190019 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.190028 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.190045 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.190054 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:00Z","lastTransitionTime":"2025-11-24T07:59:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.293302 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.293372 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.293384 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.293401 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.293411 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:00Z","lastTransitionTime":"2025-11-24T07:59:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.396320 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.396389 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.396401 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.396421 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.396437 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:00Z","lastTransitionTime":"2025-11-24T07:59:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.499156 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.499199 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.499211 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.499229 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.499241 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:00Z","lastTransitionTime":"2025-11-24T07:59:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.603058 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.603112 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.603148 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.603166 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.603177 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:00Z","lastTransitionTime":"2025-11-24T07:59:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.706878 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.706933 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.706941 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.706959 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.706969 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:00Z","lastTransitionTime":"2025-11-24T07:59:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.760589 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.760654 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.760544 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:59:00 crc kubenswrapper[4691]: E1124 07:59:00.760776 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:59:00 crc kubenswrapper[4691]: E1124 07:59:00.760948 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:59:00 crc kubenswrapper[4691]: E1124 07:59:00.760993 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.814421 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.814486 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.814498 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.814518 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.814531 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:00Z","lastTransitionTime":"2025-11-24T07:59:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.917994 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.918050 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.918060 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.918077 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.918087 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:00Z","lastTransitionTime":"2025-11-24T07:59:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:00 crc kubenswrapper[4691]: I1124 07:59:00.924686 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/21147e4f-4335-4c12-9a81-aa333d8301db-metrics-certs\") pod \"network-metrics-daemon-98whr\" (UID: \"21147e4f-4335-4c12-9a81-aa333d8301db\") " pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:59:00 crc kubenswrapper[4691]: E1124 07:59:00.924884 4691 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 07:59:00 crc kubenswrapper[4691]: E1124 07:59:00.924954 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/21147e4f-4335-4c12-9a81-aa333d8301db-metrics-certs podName:21147e4f-4335-4c12-9a81-aa333d8301db nodeName:}" failed. No retries permitted until 2025-11-24 08:00:04.924933442 +0000 UTC m=+166.923882691 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/21147e4f-4335-4c12-9a81-aa333d8301db-metrics-certs") pod "network-metrics-daemon-98whr" (UID: "21147e4f-4335-4c12-9a81-aa333d8301db") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.021177 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.021255 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.021275 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.021305 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.021326 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:01Z","lastTransitionTime":"2025-11-24T07:59:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.124354 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.124421 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.124440 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.124494 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.124515 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:01Z","lastTransitionTime":"2025-11-24T07:59:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.228622 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.228695 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.228720 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.228756 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.228781 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:01Z","lastTransitionTime":"2025-11-24T07:59:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.331649 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.331728 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.331742 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.331760 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.331773 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:01Z","lastTransitionTime":"2025-11-24T07:59:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.433913 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.433967 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.433982 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.434000 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.434014 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:01Z","lastTransitionTime":"2025-11-24T07:59:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.537581 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.537668 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.537684 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.537712 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.537733 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:01Z","lastTransitionTime":"2025-11-24T07:59:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.641206 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.641267 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.641278 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.641307 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.641323 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:01Z","lastTransitionTime":"2025-11-24T07:59:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.744990 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.745065 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.745093 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.745124 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.745148 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:01Z","lastTransitionTime":"2025-11-24T07:59:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.759938 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:59:01 crc kubenswrapper[4691]: E1124 07:59:01.760158 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.848857 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.848931 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.848957 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.848988 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.849012 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:01Z","lastTransitionTime":"2025-11-24T07:59:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.952676 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.952896 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.952963 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.953004 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:01 crc kubenswrapper[4691]: I1124 07:59:01.953029 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:01Z","lastTransitionTime":"2025-11-24T07:59:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.055746 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.055807 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.055820 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.055843 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.055858 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:02Z","lastTransitionTime":"2025-11-24T07:59:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.158334 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.158418 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.158430 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.158464 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.158477 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:02Z","lastTransitionTime":"2025-11-24T07:59:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.261284 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.261864 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.262073 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.262261 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.262434 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:02Z","lastTransitionTime":"2025-11-24T07:59:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.365987 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.367310 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.367583 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.367817 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.368016 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:02Z","lastTransitionTime":"2025-11-24T07:59:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.471110 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.471144 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.471152 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.471168 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.471179 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:02Z","lastTransitionTime":"2025-11-24T07:59:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.574176 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.574227 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.574251 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.574280 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.574302 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:02Z","lastTransitionTime":"2025-11-24T07:59:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.677354 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.677401 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.677412 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.677432 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.677444 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:02Z","lastTransitionTime":"2025-11-24T07:59:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.760050 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.760062 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:59:02 crc kubenswrapper[4691]: E1124 07:59:02.760218 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:59:02 crc kubenswrapper[4691]: E1124 07:59:02.760384 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.760656 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:59:02 crc kubenswrapper[4691]: E1124 07:59:02.760825 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.779911 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.779953 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.779964 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.779982 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.779994 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:02Z","lastTransitionTime":"2025-11-24T07:59:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.883020 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.883078 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.883094 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.883114 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.883126 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:02Z","lastTransitionTime":"2025-11-24T07:59:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.986042 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.986090 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.986100 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.986117 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:02 crc kubenswrapper[4691]: I1124 07:59:02.986128 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:02Z","lastTransitionTime":"2025-11-24T07:59:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.089411 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.089482 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.089501 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.089525 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.089538 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:03Z","lastTransitionTime":"2025-11-24T07:59:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.192352 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.192417 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.192433 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.192470 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.192487 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:03Z","lastTransitionTime":"2025-11-24T07:59:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.295211 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.295332 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.295351 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.295372 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.295382 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:03Z","lastTransitionTime":"2025-11-24T07:59:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.399097 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.400210 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.400392 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.400555 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.400751 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:03Z","lastTransitionTime":"2025-11-24T07:59:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.503933 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.503980 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.503996 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.504016 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.504032 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:03Z","lastTransitionTime":"2025-11-24T07:59:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.606844 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.606893 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.606907 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.606928 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.606943 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:03Z","lastTransitionTime":"2025-11-24T07:59:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.709115 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.709149 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.709158 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.709171 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.709179 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:03Z","lastTransitionTime":"2025-11-24T07:59:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.760092 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:59:03 crc kubenswrapper[4691]: E1124 07:59:03.760248 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.811983 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.812043 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.812054 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.812069 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.812078 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:03Z","lastTransitionTime":"2025-11-24T07:59:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.914076 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.914136 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.914152 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.914171 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:03 crc kubenswrapper[4691]: I1124 07:59:03.914183 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:03Z","lastTransitionTime":"2025-11-24T07:59:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.017493 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.017568 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.017588 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.017617 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.017636 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:04Z","lastTransitionTime":"2025-11-24T07:59:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.120401 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.120484 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.120497 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.120522 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.120534 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:04Z","lastTransitionTime":"2025-11-24T07:59:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.223142 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.223192 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.223205 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.223223 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.223237 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:04Z","lastTransitionTime":"2025-11-24T07:59:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.326832 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.326886 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.326901 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.326924 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.326937 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:04Z","lastTransitionTime":"2025-11-24T07:59:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.390973 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6f24c_106a6e78-a004-4232-a0a2-efecf2f7c248/ovnkube-controller/3.log" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.391765 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6f24c_106a6e78-a004-4232-a0a2-efecf2f7c248/ovnkube-controller/2.log" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.394440 4691 generic.go:334] "Generic (PLEG): container finished" podID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerID="62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805" exitCode=1 Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.394468 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" event={"ID":"106a6e78-a004-4232-a0a2-efecf2f7c248","Type":"ContainerDied","Data":"62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805"} Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.394541 4691 scope.go:117] "RemoveContainer" containerID="cff243a6a8f5708245922e399a2c8d8f58545c5aeaa7ff1c56c967b75c21733e" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.395306 4691 scope.go:117] "RemoveContainer" containerID="62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805" Nov 24 07:59:04 crc kubenswrapper[4691]: E1124 07:59:04.395834 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-6f24c_openshift-ovn-kubernetes(106a6e78-a004-4232-a0a2-efecf2f7c248)\"" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.432433 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.432495 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.432504 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.432521 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.432531 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:04Z","lastTransitionTime":"2025-11-24T07:59:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.434946 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=86.434926666 podStartE2EDuration="1m26.434926666s" podCreationTimestamp="2025-11-24 07:57:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 07:59:04.43368441 +0000 UTC m=+106.432633659" watchObservedRunningTime="2025-11-24 07:59:04.434926666 +0000 UTC m=+106.433875935" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.479189 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-gxxrf" podStartSLOduration=83.479159653 podStartE2EDuration="1m23.479159653s" podCreationTimestamp="2025-11-24 07:57:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 07:59:04.477568856 +0000 UTC m=+106.476518115" watchObservedRunningTime="2025-11-24 07:59:04.479159653 +0000 UTC m=+106.478108912" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.515603 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-zw5l9" podStartSLOduration=83.515582371 podStartE2EDuration="1m23.515582371s" podCreationTimestamp="2025-11-24 07:57:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 07:59:04.501811537 +0000 UTC m=+106.500760806" watchObservedRunningTime="2025-11-24 07:59:04.515582371 +0000 UTC m=+106.514531620" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.528690 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=27.528663375 podStartE2EDuration="27.528663375s" podCreationTimestamp="2025-11-24 07:58:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 07:59:04.52815094 +0000 UTC m=+106.527100189" watchObservedRunningTime="2025-11-24 07:59:04.528663375 +0000 UTC m=+106.527612634" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.528838 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podStartSLOduration=83.52883235 podStartE2EDuration="1m23.52883235s" podCreationTimestamp="2025-11-24 07:57:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 07:59:04.51621546 +0000 UTC m=+106.515164709" watchObservedRunningTime="2025-11-24 07:59:04.52883235 +0000 UTC m=+106.527781609" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.536278 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.536333 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.536345 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.536364 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.536377 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:04Z","lastTransitionTime":"2025-11-24T07:59:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.565802 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=87.565763963 podStartE2EDuration="1m27.565763963s" podCreationTimestamp="2025-11-24 07:57:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 07:59:04.550969739 +0000 UTC m=+106.549918988" watchObservedRunningTime="2025-11-24 07:59:04.565763963 +0000 UTC m=+106.564713232" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.566680 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=51.56667264 podStartE2EDuration="51.56667264s" podCreationTimestamp="2025-11-24 07:58:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 07:59:04.565972529 +0000 UTC m=+106.564921788" watchObservedRunningTime="2025-11-24 07:59:04.56667264 +0000 UTC m=+106.565621909" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.630078 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-7pdtc" podStartSLOduration=83.630049279 podStartE2EDuration="1m23.630049279s" podCreationTimestamp="2025-11-24 07:57:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 07:59:04.629861743 +0000 UTC m=+106.628811012" watchObservedRunningTime="2025-11-24 07:59:04.630049279 +0000 UTC m=+106.628998528" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.639196 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.639253 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.639266 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.639284 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.639300 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:04Z","lastTransitionTime":"2025-11-24T07:59:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.665633 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=82.665600901 podStartE2EDuration="1m22.665600901s" podCreationTimestamp="2025-11-24 07:57:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 07:59:04.664029195 +0000 UTC m=+106.662978444" watchObservedRunningTime="2025-11-24 07:59:04.665600901 +0000 UTC m=+106.664550170" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.710418 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-frdx5" podStartSLOduration=83.710391155 podStartE2EDuration="1m23.710391155s" podCreationTimestamp="2025-11-24 07:57:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 07:59:04.709339234 +0000 UTC m=+106.708288513" watchObservedRunningTime="2025-11-24 07:59:04.710391155 +0000 UTC m=+106.709340424" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.741515 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.741565 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.741582 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.741606 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.741622 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:04Z","lastTransitionTime":"2025-11-24T07:59:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.760563 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.760617 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.760630 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:59:04 crc kubenswrapper[4691]: E1124 07:59:04.760712 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:59:04 crc kubenswrapper[4691]: E1124 07:59:04.760804 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:59:04 crc kubenswrapper[4691]: E1124 07:59:04.760852 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.766379 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4bjjp" podStartSLOduration=82.766360957 podStartE2EDuration="1m22.766360957s" podCreationTimestamp="2025-11-24 07:57:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 07:59:04.749677297 +0000 UTC m=+106.748626536" watchObservedRunningTime="2025-11-24 07:59:04.766360957 +0000 UTC m=+106.765310206" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.844572 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.844620 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.844638 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.844662 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.844680 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:04Z","lastTransitionTime":"2025-11-24T07:59:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.947304 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.947351 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.947364 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.947385 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:04 crc kubenswrapper[4691]: I1124 07:59:04.947397 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:04Z","lastTransitionTime":"2025-11-24T07:59:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.050561 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.050598 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.050606 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.050620 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.050629 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:05Z","lastTransitionTime":"2025-11-24T07:59:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.153766 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.153825 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.153839 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.153859 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.153871 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:05Z","lastTransitionTime":"2025-11-24T07:59:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.257088 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.257144 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.257157 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.257176 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.257188 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:05Z","lastTransitionTime":"2025-11-24T07:59:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.360737 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.360803 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.360825 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.360855 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.360878 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:05Z","lastTransitionTime":"2025-11-24T07:59:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.401246 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6f24c_106a6e78-a004-4232-a0a2-efecf2f7c248/ovnkube-controller/3.log" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.464070 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.464147 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.464172 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.464206 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.464229 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:05Z","lastTransitionTime":"2025-11-24T07:59:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.567189 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.567265 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.567290 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.567323 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.567353 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:05Z","lastTransitionTime":"2025-11-24T07:59:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.671135 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.671279 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.671304 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.671332 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.671350 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:05Z","lastTransitionTime":"2025-11-24T07:59:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.760400 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:59:05 crc kubenswrapper[4691]: E1124 07:59:05.760673 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.775107 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.775163 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.775186 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.775217 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.775240 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:05Z","lastTransitionTime":"2025-11-24T07:59:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.879387 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.879497 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.879523 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.879557 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.879580 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:05Z","lastTransitionTime":"2025-11-24T07:59:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.982754 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.982842 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.982859 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.982887 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:05 crc kubenswrapper[4691]: I1124 07:59:05.982907 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:05Z","lastTransitionTime":"2025-11-24T07:59:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.086630 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.086685 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.086697 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.086720 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.086734 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:06Z","lastTransitionTime":"2025-11-24T07:59:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.189992 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.190055 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.190068 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.190091 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.190106 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:06Z","lastTransitionTime":"2025-11-24T07:59:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.293520 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.293585 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.293608 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.293639 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.293663 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:06Z","lastTransitionTime":"2025-11-24T07:59:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.397277 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.397350 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.397368 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.397395 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.397416 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:06Z","lastTransitionTime":"2025-11-24T07:59:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.500793 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.500879 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.500903 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.500936 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.500959 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:06Z","lastTransitionTime":"2025-11-24T07:59:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.605215 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.605290 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.605304 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.605329 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.605344 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:06Z","lastTransitionTime":"2025-11-24T07:59:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.709248 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.709298 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.709311 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.709328 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.709344 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:06Z","lastTransitionTime":"2025-11-24T07:59:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.760122 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.760234 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:59:06 crc kubenswrapper[4691]: E1124 07:59:06.760318 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:59:06 crc kubenswrapper[4691]: E1124 07:59:06.760521 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.760235 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:59:06 crc kubenswrapper[4691]: E1124 07:59:06.760668 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.813022 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.813113 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.813137 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.813169 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.813192 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:06Z","lastTransitionTime":"2025-11-24T07:59:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.864272 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.864324 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.864347 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.864369 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.864381 4691 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-24T07:59:06Z","lastTransitionTime":"2025-11-24T07:59:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.937397 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-fhwkp"] Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.938290 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fhwkp" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.940858 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.941106 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.941814 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 24 07:59:06 crc kubenswrapper[4691]: I1124 07:59:06.942336 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 24 07:59:07 crc kubenswrapper[4691]: I1124 07:59:07.105577 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/9d346f64-bfd3-48be-ae62-7599089b4cfa-service-ca\") pod \"cluster-version-operator-5c965bbfc6-fhwkp\" (UID: \"9d346f64-bfd3-48be-ae62-7599089b4cfa\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fhwkp" Nov 24 07:59:07 crc kubenswrapper[4691]: I1124 07:59:07.105632 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d346f64-bfd3-48be-ae62-7599089b4cfa-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-fhwkp\" (UID: \"9d346f64-bfd3-48be-ae62-7599089b4cfa\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fhwkp" Nov 24 07:59:07 crc kubenswrapper[4691]: I1124 07:59:07.105708 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/9d346f64-bfd3-48be-ae62-7599089b4cfa-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-fhwkp\" (UID: \"9d346f64-bfd3-48be-ae62-7599089b4cfa\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fhwkp" Nov 24 07:59:07 crc kubenswrapper[4691]: I1124 07:59:07.105735 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/9d346f64-bfd3-48be-ae62-7599089b4cfa-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-fhwkp\" (UID: \"9d346f64-bfd3-48be-ae62-7599089b4cfa\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fhwkp" Nov 24 07:59:07 crc kubenswrapper[4691]: I1124 07:59:07.105762 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9d346f64-bfd3-48be-ae62-7599089b4cfa-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-fhwkp\" (UID: \"9d346f64-bfd3-48be-ae62-7599089b4cfa\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fhwkp" Nov 24 07:59:07 crc kubenswrapper[4691]: I1124 07:59:07.206720 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/9d346f64-bfd3-48be-ae62-7599089b4cfa-service-ca\") pod \"cluster-version-operator-5c965bbfc6-fhwkp\" (UID: \"9d346f64-bfd3-48be-ae62-7599089b4cfa\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fhwkp" Nov 24 07:59:07 crc kubenswrapper[4691]: I1124 07:59:07.206781 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d346f64-bfd3-48be-ae62-7599089b4cfa-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-fhwkp\" (UID: \"9d346f64-bfd3-48be-ae62-7599089b4cfa\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fhwkp" Nov 24 07:59:07 crc kubenswrapper[4691]: I1124 07:59:07.206854 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/9d346f64-bfd3-48be-ae62-7599089b4cfa-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-fhwkp\" (UID: \"9d346f64-bfd3-48be-ae62-7599089b4cfa\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fhwkp" Nov 24 07:59:07 crc kubenswrapper[4691]: I1124 07:59:07.206878 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/9d346f64-bfd3-48be-ae62-7599089b4cfa-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-fhwkp\" (UID: \"9d346f64-bfd3-48be-ae62-7599089b4cfa\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fhwkp" Nov 24 07:59:07 crc kubenswrapper[4691]: I1124 07:59:07.206902 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9d346f64-bfd3-48be-ae62-7599089b4cfa-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-fhwkp\" (UID: \"9d346f64-bfd3-48be-ae62-7599089b4cfa\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fhwkp" Nov 24 07:59:07 crc kubenswrapper[4691]: I1124 07:59:07.207073 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/9d346f64-bfd3-48be-ae62-7599089b4cfa-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-fhwkp\" (UID: \"9d346f64-bfd3-48be-ae62-7599089b4cfa\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fhwkp" Nov 24 07:59:07 crc kubenswrapper[4691]: I1124 07:59:07.207123 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/9d346f64-bfd3-48be-ae62-7599089b4cfa-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-fhwkp\" (UID: \"9d346f64-bfd3-48be-ae62-7599089b4cfa\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fhwkp" Nov 24 07:59:07 crc kubenswrapper[4691]: I1124 07:59:07.209122 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/9d346f64-bfd3-48be-ae62-7599089b4cfa-service-ca\") pod \"cluster-version-operator-5c965bbfc6-fhwkp\" (UID: \"9d346f64-bfd3-48be-ae62-7599089b4cfa\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fhwkp" Nov 24 07:59:07 crc kubenswrapper[4691]: I1124 07:59:07.215993 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d346f64-bfd3-48be-ae62-7599089b4cfa-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-fhwkp\" (UID: \"9d346f64-bfd3-48be-ae62-7599089b4cfa\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fhwkp" Nov 24 07:59:07 crc kubenswrapper[4691]: I1124 07:59:07.223639 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9d346f64-bfd3-48be-ae62-7599089b4cfa-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-fhwkp\" (UID: \"9d346f64-bfd3-48be-ae62-7599089b4cfa\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fhwkp" Nov 24 07:59:07 crc kubenswrapper[4691]: I1124 07:59:07.261693 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fhwkp" Nov 24 07:59:07 crc kubenswrapper[4691]: W1124 07:59:07.277672 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d346f64_bfd3_48be_ae62_7599089b4cfa.slice/crio-4d5be0dc5380928a78703c07d65f5e1630eb110da4a1fb0b1e15fe245c92c6cd WatchSource:0}: Error finding container 4d5be0dc5380928a78703c07d65f5e1630eb110da4a1fb0b1e15fe245c92c6cd: Status 404 returned error can't find the container with id 4d5be0dc5380928a78703c07d65f5e1630eb110da4a1fb0b1e15fe245c92c6cd Nov 24 07:59:07 crc kubenswrapper[4691]: I1124 07:59:07.414551 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fhwkp" event={"ID":"9d346f64-bfd3-48be-ae62-7599089b4cfa","Type":"ContainerStarted","Data":"e91eb6abcdd1a73b348a2a36bad11f0d077c72162266630b7244317fb9bd8080"} Nov 24 07:59:07 crc kubenswrapper[4691]: I1124 07:59:07.414633 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fhwkp" event={"ID":"9d346f64-bfd3-48be-ae62-7599089b4cfa","Type":"ContainerStarted","Data":"4d5be0dc5380928a78703c07d65f5e1630eb110da4a1fb0b1e15fe245c92c6cd"} Nov 24 07:59:07 crc kubenswrapper[4691]: I1124 07:59:07.429171 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-fhwkp" podStartSLOduration=86.429142501 podStartE2EDuration="1m26.429142501s" podCreationTimestamp="2025-11-24 07:57:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 07:59:07.428800991 +0000 UTC m=+109.427750250" watchObservedRunningTime="2025-11-24 07:59:07.429142501 +0000 UTC m=+109.428091740" Nov 24 07:59:07 crc kubenswrapper[4691]: I1124 07:59:07.759625 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:59:07 crc kubenswrapper[4691]: E1124 07:59:07.759822 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:59:08 crc kubenswrapper[4691]: I1124 07:59:08.759644 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:59:08 crc kubenswrapper[4691]: I1124 07:59:08.759697 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:59:08 crc kubenswrapper[4691]: E1124 07:59:08.760833 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:59:08 crc kubenswrapper[4691]: I1124 07:59:08.759749 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:59:08 crc kubenswrapper[4691]: E1124 07:59:08.761112 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:59:08 crc kubenswrapper[4691]: E1124 07:59:08.761181 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:59:09 crc kubenswrapper[4691]: I1124 07:59:09.760347 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:59:09 crc kubenswrapper[4691]: E1124 07:59:09.762443 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:59:10 crc kubenswrapper[4691]: I1124 07:59:10.760576 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:59:10 crc kubenswrapper[4691]: I1124 07:59:10.760666 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:59:10 crc kubenswrapper[4691]: E1124 07:59:10.760806 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:59:10 crc kubenswrapper[4691]: E1124 07:59:10.760941 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:59:10 crc kubenswrapper[4691]: I1124 07:59:10.761076 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:59:10 crc kubenswrapper[4691]: E1124 07:59:10.761685 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:59:11 crc kubenswrapper[4691]: I1124 07:59:11.760211 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:59:11 crc kubenswrapper[4691]: E1124 07:59:11.760669 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:59:12 crc kubenswrapper[4691]: I1124 07:59:12.759854 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:59:12 crc kubenswrapper[4691]: I1124 07:59:12.759854 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:59:12 crc kubenswrapper[4691]: E1124 07:59:12.760059 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:59:12 crc kubenswrapper[4691]: E1124 07:59:12.760199 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:59:12 crc kubenswrapper[4691]: I1124 07:59:12.759893 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:59:12 crc kubenswrapper[4691]: E1124 07:59:12.760547 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:59:13 crc kubenswrapper[4691]: I1124 07:59:13.760580 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:59:13 crc kubenswrapper[4691]: E1124 07:59:13.760838 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:59:14 crc kubenswrapper[4691]: I1124 07:59:14.759925 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:59:14 crc kubenswrapper[4691]: E1124 07:59:14.760224 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:59:14 crc kubenswrapper[4691]: I1124 07:59:14.760630 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:59:14 crc kubenswrapper[4691]: E1124 07:59:14.760802 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:59:14 crc kubenswrapper[4691]: I1124 07:59:14.760955 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:59:14 crc kubenswrapper[4691]: E1124 07:59:14.761003 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:59:15 crc kubenswrapper[4691]: I1124 07:59:15.760230 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:59:15 crc kubenswrapper[4691]: E1124 07:59:15.760478 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:59:16 crc kubenswrapper[4691]: I1124 07:59:16.449705 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-gxxrf_b2332a73-f85c-470c-9209-c5e5cd1bc3a1/kube-multus/1.log" Nov 24 07:59:16 crc kubenswrapper[4691]: I1124 07:59:16.450742 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-gxxrf_b2332a73-f85c-470c-9209-c5e5cd1bc3a1/kube-multus/0.log" Nov 24 07:59:16 crc kubenswrapper[4691]: I1124 07:59:16.450832 4691 generic.go:334] "Generic (PLEG): container finished" podID="b2332a73-f85c-470c-9209-c5e5cd1bc3a1" containerID="d2fe7e8123aeaf275afa9ad3f208b5c39433b15d906dbad0c06cd75f8a8183aa" exitCode=1 Nov 24 07:59:16 crc kubenswrapper[4691]: I1124 07:59:16.450894 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-gxxrf" event={"ID":"b2332a73-f85c-470c-9209-c5e5cd1bc3a1","Type":"ContainerDied","Data":"d2fe7e8123aeaf275afa9ad3f208b5c39433b15d906dbad0c06cd75f8a8183aa"} Nov 24 07:59:16 crc kubenswrapper[4691]: I1124 07:59:16.450995 4691 scope.go:117] "RemoveContainer" containerID="ce2c0da5c5dcfc067f52c8608b0044c149d19ae2a780fd0d4110c1cf36410703" Nov 24 07:59:16 crc kubenswrapper[4691]: I1124 07:59:16.451662 4691 scope.go:117] "RemoveContainer" containerID="d2fe7e8123aeaf275afa9ad3f208b5c39433b15d906dbad0c06cd75f8a8183aa" Nov 24 07:59:16 crc kubenswrapper[4691]: E1124 07:59:16.452025 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-gxxrf_openshift-multus(b2332a73-f85c-470c-9209-c5e5cd1bc3a1)\"" pod="openshift-multus/multus-gxxrf" podUID="b2332a73-f85c-470c-9209-c5e5cd1bc3a1" Nov 24 07:59:16 crc kubenswrapper[4691]: I1124 07:59:16.760040 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:59:16 crc kubenswrapper[4691]: I1124 07:59:16.760111 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:59:16 crc kubenswrapper[4691]: I1124 07:59:16.760664 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:59:16 crc kubenswrapper[4691]: E1124 07:59:16.760859 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:59:16 crc kubenswrapper[4691]: E1124 07:59:16.761149 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:59:16 crc kubenswrapper[4691]: E1124 07:59:16.761205 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:59:17 crc kubenswrapper[4691]: I1124 07:59:17.456204 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-gxxrf_b2332a73-f85c-470c-9209-c5e5cd1bc3a1/kube-multus/1.log" Nov 24 07:59:17 crc kubenswrapper[4691]: I1124 07:59:17.760331 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:59:17 crc kubenswrapper[4691]: E1124 07:59:17.760632 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:59:17 crc kubenswrapper[4691]: I1124 07:59:17.761899 4691 scope.go:117] "RemoveContainer" containerID="62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805" Nov 24 07:59:17 crc kubenswrapper[4691]: E1124 07:59:17.762248 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-6f24c_openshift-ovn-kubernetes(106a6e78-a004-4232-a0a2-efecf2f7c248)\"" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" Nov 24 07:59:18 crc kubenswrapper[4691]: E1124 07:59:18.732988 4691 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 24 07:59:18 crc kubenswrapper[4691]: I1124 07:59:18.759660 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:59:18 crc kubenswrapper[4691]: I1124 07:59:18.763874 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:59:18 crc kubenswrapper[4691]: I1124 07:59:18.763941 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:59:18 crc kubenswrapper[4691]: E1124 07:59:18.764109 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:59:18 crc kubenswrapper[4691]: E1124 07:59:18.764159 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:59:18 crc kubenswrapper[4691]: E1124 07:59:18.764297 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:59:18 crc kubenswrapper[4691]: E1124 07:59:18.872687 4691 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 24 07:59:19 crc kubenswrapper[4691]: I1124 07:59:19.760310 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:59:19 crc kubenswrapper[4691]: E1124 07:59:19.761911 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:59:20 crc kubenswrapper[4691]: I1124 07:59:20.760120 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:59:20 crc kubenswrapper[4691]: I1124 07:59:20.760120 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:59:20 crc kubenswrapper[4691]: E1124 07:59:20.760394 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:59:20 crc kubenswrapper[4691]: E1124 07:59:20.760569 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:59:20 crc kubenswrapper[4691]: I1124 07:59:20.760163 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:59:20 crc kubenswrapper[4691]: E1124 07:59:20.760752 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:59:21 crc kubenswrapper[4691]: I1124 07:59:21.760220 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:59:21 crc kubenswrapper[4691]: E1124 07:59:21.760387 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:59:22 crc kubenswrapper[4691]: I1124 07:59:22.760496 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:59:22 crc kubenswrapper[4691]: I1124 07:59:22.760521 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:59:22 crc kubenswrapper[4691]: I1124 07:59:22.760589 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:59:22 crc kubenswrapper[4691]: E1124 07:59:22.761532 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:59:22 crc kubenswrapper[4691]: E1124 07:59:22.761650 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:59:22 crc kubenswrapper[4691]: E1124 07:59:22.761750 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:59:23 crc kubenswrapper[4691]: I1124 07:59:23.760238 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:59:23 crc kubenswrapper[4691]: E1124 07:59:23.760391 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:59:23 crc kubenswrapper[4691]: E1124 07:59:23.875947 4691 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 24 07:59:24 crc kubenswrapper[4691]: I1124 07:59:24.760244 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:59:24 crc kubenswrapper[4691]: E1124 07:59:24.760423 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:59:24 crc kubenswrapper[4691]: I1124 07:59:24.760244 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:59:24 crc kubenswrapper[4691]: I1124 07:59:24.760246 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:59:24 crc kubenswrapper[4691]: E1124 07:59:24.761260 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:59:24 crc kubenswrapper[4691]: E1124 07:59:24.761381 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:59:25 crc kubenswrapper[4691]: I1124 07:59:25.760290 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:59:25 crc kubenswrapper[4691]: E1124 07:59:25.761705 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:59:26 crc kubenswrapper[4691]: I1124 07:59:26.759583 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:59:26 crc kubenswrapper[4691]: I1124 07:59:26.759719 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:59:26 crc kubenswrapper[4691]: I1124 07:59:26.759774 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:59:26 crc kubenswrapper[4691]: E1124 07:59:26.759919 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:59:26 crc kubenswrapper[4691]: E1124 07:59:26.760015 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:59:26 crc kubenswrapper[4691]: E1124 07:59:26.760140 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:59:27 crc kubenswrapper[4691]: I1124 07:59:27.759507 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:59:27 crc kubenswrapper[4691]: E1124 07:59:27.759688 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:59:28 crc kubenswrapper[4691]: I1124 07:59:28.760178 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:59:28 crc kubenswrapper[4691]: I1124 07:59:28.760937 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:59:28 crc kubenswrapper[4691]: I1124 07:59:28.762258 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:59:28 crc kubenswrapper[4691]: E1124 07:59:28.762430 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:59:28 crc kubenswrapper[4691]: E1124 07:59:28.762760 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:59:28 crc kubenswrapper[4691]: E1124 07:59:28.762895 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:59:28 crc kubenswrapper[4691]: I1124 07:59:28.762977 4691 scope.go:117] "RemoveContainer" containerID="d2fe7e8123aeaf275afa9ad3f208b5c39433b15d906dbad0c06cd75f8a8183aa" Nov 24 07:59:28 crc kubenswrapper[4691]: E1124 07:59:28.876916 4691 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 24 07:59:29 crc kubenswrapper[4691]: I1124 07:59:29.503413 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-gxxrf_b2332a73-f85c-470c-9209-c5e5cd1bc3a1/kube-multus/1.log" Nov 24 07:59:29 crc kubenswrapper[4691]: I1124 07:59:29.503837 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-gxxrf" event={"ID":"b2332a73-f85c-470c-9209-c5e5cd1bc3a1","Type":"ContainerStarted","Data":"b88a5444a724c9be6f939634f2ae4dedc6fb1554307eb43642e1e6350e8cc201"} Nov 24 07:59:29 crc kubenswrapper[4691]: I1124 07:59:29.760288 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:59:29 crc kubenswrapper[4691]: E1124 07:59:29.760565 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:59:30 crc kubenswrapper[4691]: I1124 07:59:30.760199 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:59:30 crc kubenswrapper[4691]: I1124 07:59:30.760343 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:59:30 crc kubenswrapper[4691]: I1124 07:59:30.760422 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:59:30 crc kubenswrapper[4691]: E1124 07:59:30.760652 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:59:30 crc kubenswrapper[4691]: E1124 07:59:30.760836 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:59:30 crc kubenswrapper[4691]: E1124 07:59:30.761058 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:59:31 crc kubenswrapper[4691]: I1124 07:59:31.759865 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:59:31 crc kubenswrapper[4691]: E1124 07:59:31.760076 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:59:32 crc kubenswrapper[4691]: I1124 07:59:32.759634 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:59:32 crc kubenswrapper[4691]: I1124 07:59:32.759833 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:59:32 crc kubenswrapper[4691]: I1124 07:59:32.759877 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:59:32 crc kubenswrapper[4691]: E1124 07:59:32.759879 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:59:32 crc kubenswrapper[4691]: E1124 07:59:32.760607 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:59:32 crc kubenswrapper[4691]: E1124 07:59:32.760790 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:59:32 crc kubenswrapper[4691]: I1124 07:59:32.761407 4691 scope.go:117] "RemoveContainer" containerID="62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805" Nov 24 07:59:32 crc kubenswrapper[4691]: E1124 07:59:32.761729 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-6f24c_openshift-ovn-kubernetes(106a6e78-a004-4232-a0a2-efecf2f7c248)\"" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" Nov 24 07:59:33 crc kubenswrapper[4691]: I1124 07:59:33.759839 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:59:33 crc kubenswrapper[4691]: E1124 07:59:33.760013 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:59:33 crc kubenswrapper[4691]: E1124 07:59:33.878568 4691 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 24 07:59:34 crc kubenswrapper[4691]: I1124 07:59:34.759910 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:59:34 crc kubenswrapper[4691]: I1124 07:59:34.759957 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:59:34 crc kubenswrapper[4691]: I1124 07:59:34.760041 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:59:34 crc kubenswrapper[4691]: E1124 07:59:34.760166 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:59:34 crc kubenswrapper[4691]: E1124 07:59:34.760403 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:59:34 crc kubenswrapper[4691]: E1124 07:59:34.760814 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:59:35 crc kubenswrapper[4691]: I1124 07:59:35.759608 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:59:35 crc kubenswrapper[4691]: E1124 07:59:35.759827 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:59:36 crc kubenswrapper[4691]: I1124 07:59:36.759758 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:59:36 crc kubenswrapper[4691]: I1124 07:59:36.759773 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:59:36 crc kubenswrapper[4691]: E1124 07:59:36.759961 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:59:36 crc kubenswrapper[4691]: I1124 07:59:36.759784 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:59:36 crc kubenswrapper[4691]: E1124 07:59:36.760098 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:59:36 crc kubenswrapper[4691]: E1124 07:59:36.760211 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:59:37 crc kubenswrapper[4691]: I1124 07:59:37.760435 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:59:37 crc kubenswrapper[4691]: E1124 07:59:37.761240 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:59:38 crc kubenswrapper[4691]: I1124 07:59:38.762727 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:59:38 crc kubenswrapper[4691]: I1124 07:59:38.762792 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:59:38 crc kubenswrapper[4691]: I1124 07:59:38.762851 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:59:38 crc kubenswrapper[4691]: E1124 07:59:38.769814 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:59:38 crc kubenswrapper[4691]: E1124 07:59:38.770143 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:59:38 crc kubenswrapper[4691]: E1124 07:59:38.770487 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:59:38 crc kubenswrapper[4691]: E1124 07:59:38.879186 4691 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 24 07:59:39 crc kubenswrapper[4691]: I1124 07:59:39.760150 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:59:39 crc kubenswrapper[4691]: E1124 07:59:39.760688 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:59:40 crc kubenswrapper[4691]: I1124 07:59:40.759851 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:59:40 crc kubenswrapper[4691]: I1124 07:59:40.759873 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:59:40 crc kubenswrapper[4691]: I1124 07:59:40.760038 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:59:40 crc kubenswrapper[4691]: E1124 07:59:40.760176 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:59:40 crc kubenswrapper[4691]: E1124 07:59:40.760780 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:59:40 crc kubenswrapper[4691]: E1124 07:59:40.761043 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:59:41 crc kubenswrapper[4691]: I1124 07:59:41.759584 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:59:41 crc kubenswrapper[4691]: E1124 07:59:41.759777 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:59:42 crc kubenswrapper[4691]: I1124 07:59:42.760480 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:59:42 crc kubenswrapper[4691]: E1124 07:59:42.760702 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:59:42 crc kubenswrapper[4691]: I1124 07:59:42.760783 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:59:42 crc kubenswrapper[4691]: E1124 07:59:42.760969 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:59:42 crc kubenswrapper[4691]: I1124 07:59:42.761049 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:59:42 crc kubenswrapper[4691]: E1124 07:59:42.761124 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:59:43 crc kubenswrapper[4691]: I1124 07:59:43.760234 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:59:43 crc kubenswrapper[4691]: E1124 07:59:43.760392 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:59:43 crc kubenswrapper[4691]: E1124 07:59:43.881337 4691 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 24 07:59:44 crc kubenswrapper[4691]: I1124 07:59:44.760690 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:59:44 crc kubenswrapper[4691]: I1124 07:59:44.760690 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:59:44 crc kubenswrapper[4691]: E1124 07:59:44.760881 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:59:44 crc kubenswrapper[4691]: E1124 07:59:44.760947 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:59:44 crc kubenswrapper[4691]: I1124 07:59:44.760690 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:59:44 crc kubenswrapper[4691]: E1124 07:59:44.761041 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:59:45 crc kubenswrapper[4691]: I1124 07:59:45.689475 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 07:59:45 crc kubenswrapper[4691]: E1124 07:59:45.689705 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:01:47.689657542 +0000 UTC m=+269.688606791 (durationBeforeRetry 2m2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 07:59:45 crc kubenswrapper[4691]: I1124 07:59:45.690256 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:59:45 crc kubenswrapper[4691]: E1124 07:59:45.690642 4691 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 07:59:45 crc kubenswrapper[4691]: E1124 07:59:45.690793 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 08:01:47.690756014 +0000 UTC m=+269.689705303 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 24 07:59:45 crc kubenswrapper[4691]: I1124 07:59:45.759736 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:59:45 crc kubenswrapper[4691]: E1124 07:59:45.759965 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:59:45 crc kubenswrapper[4691]: I1124 07:59:45.791910 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:59:45 crc kubenswrapper[4691]: I1124 07:59:45.791997 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:59:45 crc kubenswrapper[4691]: I1124 07:59:45.792043 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:59:45 crc kubenswrapper[4691]: E1124 07:59:45.792181 4691 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 07:59:45 crc kubenswrapper[4691]: E1124 07:59:45.792238 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 07:59:45 crc kubenswrapper[4691]: E1124 07:59:45.792296 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 07:59:45 crc kubenswrapper[4691]: E1124 07:59:45.792322 4691 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:59:45 crc kubenswrapper[4691]: E1124 07:59:45.792344 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 24 07:59:45 crc kubenswrapper[4691]: E1124 07:59:45.792301 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-24 08:01:47.792261528 +0000 UTC m=+269.791210787 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 24 07:59:45 crc kubenswrapper[4691]: E1124 07:59:45.792402 4691 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 24 07:59:45 crc kubenswrapper[4691]: E1124 07:59:45.792716 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-24 08:01:47.792463183 +0000 UTC m=+269.791412442 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:59:45 crc kubenswrapper[4691]: E1124 07:59:45.792752 4691 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:59:45 crc kubenswrapper[4691]: E1124 07:59:45.792890 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-24 08:01:47.792859125 +0000 UTC m=+269.791808374 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 24 07:59:46 crc kubenswrapper[4691]: I1124 07:59:46.760307 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:59:46 crc kubenswrapper[4691]: I1124 07:59:46.760315 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:59:46 crc kubenswrapper[4691]: I1124 07:59:46.760315 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:59:46 crc kubenswrapper[4691]: E1124 07:59:46.761120 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:59:46 crc kubenswrapper[4691]: E1124 07:59:46.761376 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:59:46 crc kubenswrapper[4691]: E1124 07:59:46.761619 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:59:46 crc kubenswrapper[4691]: I1124 07:59:46.761786 4691 scope.go:117] "RemoveContainer" containerID="62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805" Nov 24 07:59:47 crc kubenswrapper[4691]: I1124 07:59:47.574866 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6f24c_106a6e78-a004-4232-a0a2-efecf2f7c248/ovnkube-controller/3.log" Nov 24 07:59:47 crc kubenswrapper[4691]: I1124 07:59:47.579437 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" event={"ID":"106a6e78-a004-4232-a0a2-efecf2f7c248","Type":"ContainerStarted","Data":"cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420"} Nov 24 07:59:47 crc kubenswrapper[4691]: I1124 07:59:47.580008 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 07:59:47 crc kubenswrapper[4691]: I1124 07:59:47.627383 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" podStartSLOduration=126.627360458 podStartE2EDuration="2m6.627360458s" podCreationTimestamp="2025-11-24 07:57:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 07:59:47.619982693 +0000 UTC m=+149.618931962" watchObservedRunningTime="2025-11-24 07:59:47.627360458 +0000 UTC m=+149.626309707" Nov 24 07:59:47 crc kubenswrapper[4691]: I1124 07:59:47.628475 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-98whr"] Nov 24 07:59:47 crc kubenswrapper[4691]: I1124 07:59:47.628590 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:59:47 crc kubenswrapper[4691]: E1124 07:59:47.628690 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:59:47 crc kubenswrapper[4691]: I1124 07:59:47.759614 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:59:47 crc kubenswrapper[4691]: E1124 07:59:47.759757 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:59:48 crc kubenswrapper[4691]: I1124 07:59:48.760741 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:59:48 crc kubenswrapper[4691]: I1124 07:59:48.760915 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:59:48 crc kubenswrapper[4691]: E1124 07:59:48.761845 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:59:48 crc kubenswrapper[4691]: E1124 07:59:48.762031 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:59:48 crc kubenswrapper[4691]: E1124 07:59:48.882003 4691 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 24 07:59:49 crc kubenswrapper[4691]: I1124 07:59:49.759717 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:59:49 crc kubenswrapper[4691]: E1124 07:59:49.759899 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:59:49 crc kubenswrapper[4691]: I1124 07:59:49.760008 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:59:49 crc kubenswrapper[4691]: E1124 07:59:49.760223 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:59:50 crc kubenswrapper[4691]: I1124 07:59:50.760514 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:59:50 crc kubenswrapper[4691]: I1124 07:59:50.760554 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:59:50 crc kubenswrapper[4691]: E1124 07:59:50.760717 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:59:50 crc kubenswrapper[4691]: E1124 07:59:50.760785 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:59:51 crc kubenswrapper[4691]: I1124 07:59:51.090037 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 07:59:51 crc kubenswrapper[4691]: I1124 07:59:51.090100 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 07:59:51 crc kubenswrapper[4691]: I1124 07:59:51.759987 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:59:51 crc kubenswrapper[4691]: E1124 07:59:51.760170 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:59:51 crc kubenswrapper[4691]: I1124 07:59:51.759987 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:59:51 crc kubenswrapper[4691]: E1124 07:59:51.760775 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:59:52 crc kubenswrapper[4691]: I1124 07:59:52.760215 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:59:52 crc kubenswrapper[4691]: I1124 07:59:52.760296 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:59:52 crc kubenswrapper[4691]: E1124 07:59:52.760404 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 24 07:59:52 crc kubenswrapper[4691]: E1124 07:59:52.760538 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 24 07:59:53 crc kubenswrapper[4691]: I1124 07:59:53.760113 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:59:53 crc kubenswrapper[4691]: I1124 07:59:53.760170 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:59:53 crc kubenswrapper[4691]: E1124 07:59:53.760341 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-98whr" podUID="21147e4f-4335-4c12-9a81-aa333d8301db" Nov 24 07:59:53 crc kubenswrapper[4691]: E1124 07:59:53.760543 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 24 07:59:54 crc kubenswrapper[4691]: I1124 07:59:54.760557 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 07:59:54 crc kubenswrapper[4691]: I1124 07:59:54.760565 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 07:59:54 crc kubenswrapper[4691]: I1124 07:59:54.762726 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 24 07:59:54 crc kubenswrapper[4691]: I1124 07:59:54.762916 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 24 07:59:54 crc kubenswrapper[4691]: I1124 07:59:54.763770 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 24 07:59:54 crc kubenswrapper[4691]: I1124 07:59:54.764679 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 24 07:59:55 crc kubenswrapper[4691]: I1124 07:59:55.760409 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 07:59:55 crc kubenswrapper[4691]: I1124 07:59:55.760483 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 07:59:55 crc kubenswrapper[4691]: I1124 07:59:55.764030 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 24 07:59:55 crc kubenswrapper[4691]: I1124 07:59:55.764477 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.067510 4691 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.101811 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-ljnvj"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.102206 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-ljnvj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.105501 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-zxfcj"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.105928 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zxfcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.106646 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.106762 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-fc99x"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.107129 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.107155 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fc99x" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.107790 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rs5qs"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.108100 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rs5qs" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.112073 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.112722 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.113263 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-7cng4"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.113601 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-7cng4" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.114291 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.114393 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.114475 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.114524 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.114627 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.114649 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.114786 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.114946 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.115086 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.115122 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-sph7r"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.115554 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.115602 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-sph7r" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.115560 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.115618 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.115678 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.115749 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.115874 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.116061 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.116291 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.118500 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-h2rcj"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.118964 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-m8bj7"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.119382 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-m8bj7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.126149 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.130710 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.131307 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.133908 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-zw9jm"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.134712 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-h9wgf"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.135292 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-h9wgf" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.135797 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-zw9jm" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.144400 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5vnfr"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.144913 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.151132 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-6b2ss"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.151902 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.152265 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5vnfr" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.152299 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.152534 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.154807 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-7p2br"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.155298 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-mdm27"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.155802 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.155852 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-7p2br" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.156139 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.156151 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.155817 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mdm27" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.156351 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.156536 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.156583 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.156624 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.156876 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.158170 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.158318 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.158410 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.158518 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.158565 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.158594 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.158700 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.158794 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.158874 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.158927 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.159031 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.159108 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.159180 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.159289 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.159371 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.159481 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.159565 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.159593 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.159646 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.159567 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.159729 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qx8cv"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.159943 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.160034 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.160110 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.160205 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.160231 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-gs8l7"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.160294 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.160526 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.160584 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qx8cv" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.160700 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.160997 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.161688 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-gs8l7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.164039 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.164430 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.164616 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.165281 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.165408 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-6n9v7"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.165503 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.165617 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.165663 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.165857 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.165998 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-6n9v7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.166715 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-k5dmp"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.167479 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-k5dmp" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.168009 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-s85fr"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.168839 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.168972 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.169748 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-s85fr" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.170008 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.170313 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.170468 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.171312 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.171633 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.171746 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.189174 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.198095 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5m226"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.199080 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-cbkxk"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.199936 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-dbzsg"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.202985 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.204003 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5m226" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.206799 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-cbkxk" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.207703 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.214617 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.214646 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.230661 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.233302 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.234165 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.234378 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.234583 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.234939 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.235137 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.235233 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.235271 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.235352 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.235505 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.235606 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.235690 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.235761 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.236432 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.236753 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.236876 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.237271 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.237405 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-njfp2"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.237595 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.237853 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.238013 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-njfp2" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.238277 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7rgnp"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.238937 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7rgnp" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.240368 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.241013 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.241237 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rnx8j"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.241727 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-q96bx"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.242037 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tqr2v"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.242233 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-q96bx" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.242275 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rnx8j" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.242736 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tqr2v" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.243845 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.244410 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.245174 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-vq867"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.245932 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-vq867" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.246064 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-wrdd6"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.246503 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.246658 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-wrdd6" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.248398 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4npq9"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.249152 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4npq9" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.251911 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9znlm"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.252376 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9znlm" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.254331 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wn248"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.254968 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-7shsd"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.255461 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-7shsd" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.255745 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wn248" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.256134 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-744kk"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.258315 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-744kk" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.265348 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.266146 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-lsqrb"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.268913 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-c8zrh"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.269323 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-lsqrb" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.269419 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399505-btzlm"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.269734 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-c8zrh" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.272365 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-22hbl"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.272586 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399505-btzlm" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.273843 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-22hbl" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.274664 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-zxfcj"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.276298 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-7p2br"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.278180 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-ljnvj"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.279208 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rs5qs"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.280967 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.285299 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.293169 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-h9wgf"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.294676 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tqr2v"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.298258 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rnx8j"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.298283 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-6b2ss"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.302350 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sl9kp\" (UniqueName: \"kubernetes.io/projected/0577b42c-04f7-4ebe-afda-6b968475f302-kube-api-access-sl9kp\") pod \"apiserver-7bbb656c7d-m459v\" (UID: \"0577b42c-04f7-4ebe-afda-6b968475f302\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.302468 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f29fbcd4-390b-4c16-8d35-331bf36b835a-trusted-ca\") pod \"ingress-operator-5b745b69d9-mdm27\" (UID: \"f29fbcd4-390b-4c16-8d35-331bf36b835a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mdm27" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.302503 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6d32b123-7986-4bd2-abdf-b8be8c855817-client-ca\") pod \"route-controller-manager-6576b87f9c-zxfcj\" (UID: \"6d32b123-7986-4bd2-abdf-b8be8c855817\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zxfcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.302550 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vmtwh\" (UniqueName: \"kubernetes.io/projected/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-kube-api-access-vmtwh\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.302575 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c182330a-7ed5-4b31-8d80-d348e821c749-auth-proxy-config\") pod \"machine-approver-56656f9798-fc99x\" (UID: \"c182330a-7ed5-4b31-8d80-d348e821c749\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fc99x" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.302596 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-console-serving-cert\") pod \"console-f9d7485db-h9wgf\" (UID: \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\") " pod="openshift-console/console-f9d7485db-h9wgf" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.302618 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-86n2d\" (UniqueName: \"kubernetes.io/projected/1b8890e7-15c8-4467-b31b-493b565c584a-kube-api-access-86n2d\") pod \"downloads-7954f5f757-7cng4\" (UID: \"1b8890e7-15c8-4467-b31b-493b565c584a\") " pod="openshift-console/downloads-7954f5f757-7cng4" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.302639 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cwktd\" (UniqueName: \"kubernetes.io/projected/7dd994cd-9276-4e56-aa73-db6007804b05-kube-api-access-cwktd\") pod \"console-operator-58897d9998-zw9jm\" (UID: \"7dd994cd-9276-4e56-aa73-db6007804b05\") " pod="openshift-console-operator/console-operator-58897d9998-zw9jm" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.302667 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6xjtp\" (UniqueName: \"kubernetes.io/projected/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-kube-api-access-6xjtp\") pod \"console-f9d7485db-h9wgf\" (UID: \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\") " pod="openshift-console/console-f9d7485db-h9wgf" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.302687 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d32b123-7986-4bd2-abdf-b8be8c855817-config\") pod \"route-controller-manager-6576b87f9c-zxfcj\" (UID: \"6d32b123-7986-4bd2-abdf-b8be8c855817\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zxfcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.302725 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/4db5010a-367e-473a-8e9e-56febfd76781-etcd-serving-ca\") pod \"apiserver-76f77b778f-6b2ss\" (UID: \"4db5010a-367e-473a-8e9e-56febfd76781\") " pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.302747 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.302770 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12724cb5-e0ed-4c92-93e6-0f223dd11bea-config\") pod \"machine-api-operator-5694c8668f-m8bj7\" (UID: \"12724cb5-e0ed-4c92-93e6-0f223dd11bea\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-m8bj7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.302790 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0577b42c-04f7-4ebe-afda-6b968475f302-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-m459v\" (UID: \"0577b42c-04f7-4ebe-afda-6b968475f302\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.302809 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.302827 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-audit-dir\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.302846 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7dd994cd-9276-4e56-aa73-db6007804b05-config\") pod \"console-operator-58897d9998-zw9jm\" (UID: \"7dd994cd-9276-4e56-aa73-db6007804b05\") " pod="openshift-console-operator/console-operator-58897d9998-zw9jm" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.302869 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-console-config\") pod \"console-f9d7485db-h9wgf\" (UID: \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\") " pod="openshift-console/console-f9d7485db-h9wgf" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.302891 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/53aa0d7f-022e-46a2-9e47-442eca753bbc-config\") pod \"controller-manager-879f6c89f-7p2br\" (UID: \"53aa0d7f-022e-46a2-9e47-442eca753bbc\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7p2br" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.302913 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x8f2n\" (UniqueName: \"kubernetes.io/projected/12724cb5-e0ed-4c92-93e6-0f223dd11bea-kube-api-access-x8f2n\") pod \"machine-api-operator-5694c8668f-m8bj7\" (UID: \"12724cb5-e0ed-4c92-93e6-0f223dd11bea\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-m8bj7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.302933 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-oauth-serving-cert\") pod \"console-f9d7485db-h9wgf\" (UID: \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\") " pod="openshift-console/console-f9d7485db-h9wgf" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.302954 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/4db5010a-367e-473a-8e9e-56febfd76781-encryption-config\") pod \"apiserver-76f77b778f-6b2ss\" (UID: \"4db5010a-367e-473a-8e9e-56febfd76781\") " pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.302982 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a556a2eb-6c10-4dd1-ad6e-5cc4084a4497-metrics-tls\") pod \"dns-operator-744455d44c-gs8l7\" (UID: \"a556a2eb-6c10-4dd1-ad6e-5cc4084a4497\") " pod="openshift-dns-operator/dns-operator-744455d44c-gs8l7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.303004 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.303028 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4db5010a-367e-473a-8e9e-56febfd76781-config\") pod \"apiserver-76f77b778f-6b2ss\" (UID: \"4db5010a-367e-473a-8e9e-56febfd76781\") " pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.303048 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ksmlf\" (UniqueName: \"kubernetes.io/projected/2d405da4-b29b-4e37-8871-a7db10fdc8d5-kube-api-access-ksmlf\") pod \"openshift-apiserver-operator-796bbdcf4f-5vnfr\" (UID: \"2d405da4-b29b-4e37-8871-a7db10fdc8d5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5vnfr" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.303069 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/4d74fbd6-b66d-4ad5-8bf7-06ce9451fa96-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-qx8cv\" (UID: \"4d74fbd6-b66d-4ad5-8bf7-06ce9451fa96\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qx8cv" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.303090 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/278ddf9e-14a8-43dd-820a-bfda668bbce1-available-featuregates\") pod \"openshift-config-operator-7777fb866f-sph7r\" (UID: \"278ddf9e-14a8-43dd-820a-bfda668bbce1\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-sph7r" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.303113 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d405da4-b29b-4e37-8871-a7db10fdc8d5-config\") pod \"openshift-apiserver-operator-796bbdcf4f-5vnfr\" (UID: \"2d405da4-b29b-4e37-8871-a7db10fdc8d5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5vnfr" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.303133 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0935333c-8a58-4595-806a-d765d455f44c-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-s85fr\" (UID: \"0935333c-8a58-4595-806a-d765d455f44c\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-s85fr" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.303154 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.303179 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/53aa0d7f-022e-46a2-9e47-442eca753bbc-client-ca\") pod \"controller-manager-879f6c89f-7p2br\" (UID: \"53aa0d7f-022e-46a2-9e47-442eca753bbc\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7p2br" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.303201 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/53aa0d7f-022e-46a2-9e47-442eca753bbc-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-7p2br\" (UID: \"53aa0d7f-022e-46a2-9e47-442eca753bbc\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7p2br" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.303222 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/f29fbcd4-390b-4c16-8d35-331bf36b835a-metrics-tls\") pod \"ingress-operator-5b745b69d9-mdm27\" (UID: \"f29fbcd4-390b-4c16-8d35-331bf36b835a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mdm27" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.303244 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b7afb5cd-ee13-48a2-9d6f-14ce967d98fe-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-rs5qs\" (UID: \"b7afb5cd-ee13-48a2-9d6f-14ce967d98fe\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rs5qs" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.303267 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.303287 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7dd994cd-9276-4e56-aa73-db6007804b05-serving-cert\") pod \"console-operator-58897d9998-zw9jm\" (UID: \"7dd994cd-9276-4e56-aa73-db6007804b05\") " pod="openshift-console-operator/console-operator-58897d9998-zw9jm" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.303309 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/35aab505-1a5e-463c-a9af-ea76e2b866de-profile-collector-cert\") pod \"olm-operator-6b444d44fb-5m226\" (UID: \"35aab505-1a5e-463c-a9af-ea76e2b866de\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5m226" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.303332 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1312328-61e0-4c2a-9cf1-dd3c72ebbf4b-config\") pod \"authentication-operator-69f744f599-ljnvj\" (UID: \"a1312328-61e0-4c2a-9cf1-dd3c72ebbf4b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ljnvj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.303350 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/4db5010a-367e-473a-8e9e-56febfd76781-etcd-client\") pod \"apiserver-76f77b778f-6b2ss\" (UID: \"4db5010a-367e-473a-8e9e-56febfd76781\") " pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.303371 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4db5010a-367e-473a-8e9e-56febfd76781-serving-cert\") pod \"apiserver-76f77b778f-6b2ss\" (UID: \"4db5010a-367e-473a-8e9e-56febfd76781\") " pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.303462 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lhrzm\" (UniqueName: \"kubernetes.io/projected/f29fbcd4-390b-4c16-8d35-331bf36b835a-kube-api-access-lhrzm\") pod \"ingress-operator-5b745b69d9-mdm27\" (UID: \"f29fbcd4-390b-4c16-8d35-331bf36b835a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mdm27" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.302389 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-k5dmp"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.304422 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5m226"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.304459 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-gs8l7"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.304482 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7rgnp"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.303489 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-audit-policies\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.304891 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.304918 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/28d1a6e7-60cf-4233-9298-4a561b105271-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-cbkxk\" (UID: \"28d1a6e7-60cf-4233-9298-4a561b105271\") " pod="openshift-marketplace/marketplace-operator-79b997595-cbkxk" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.304943 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0935333c-8a58-4595-806a-d765d455f44c-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-s85fr\" (UID: \"0935333c-8a58-4595-806a-d765d455f44c\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-s85fr" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.305021 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0577b42c-04f7-4ebe-afda-6b968475f302-etcd-client\") pod \"apiserver-7bbb656c7d-m459v\" (UID: \"0577b42c-04f7-4ebe-afda-6b968475f302\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.305048 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d32b123-7986-4bd2-abdf-b8be8c855817-serving-cert\") pod \"route-controller-manager-6576b87f9c-zxfcj\" (UID: \"6d32b123-7986-4bd2-abdf-b8be8c855817\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zxfcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.305175 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45qj5\" (UniqueName: \"kubernetes.io/projected/53aa0d7f-022e-46a2-9e47-442eca753bbc-kube-api-access-45qj5\") pod \"controller-manager-879f6c89f-7p2br\" (UID: \"53aa0d7f-022e-46a2-9e47-442eca753bbc\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7p2br" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.305207 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/0577b42c-04f7-4ebe-afda-6b968475f302-encryption-config\") pod \"apiserver-7bbb656c7d-m459v\" (UID: \"0577b42c-04f7-4ebe-afda-6b968475f302\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.305266 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-console-oauth-config\") pod \"console-f9d7485db-h9wgf\" (UID: \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\") " pod="openshift-console/console-f9d7485db-h9wgf" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.305295 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nwmc8\" (UniqueName: \"kubernetes.io/projected/a556a2eb-6c10-4dd1-ad6e-5cc4084a4497-kube-api-access-nwmc8\") pod \"dns-operator-744455d44c-gs8l7\" (UID: \"a556a2eb-6c10-4dd1-ad6e-5cc4084a4497\") " pod="openshift-dns-operator/dns-operator-744455d44c-gs8l7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.305318 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0935333c-8a58-4595-806a-d765d455f44c-config\") pod \"kube-apiserver-operator-766d6c64bb-s85fr\" (UID: \"0935333c-8a58-4595-806a-d765d455f44c\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-s85fr" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.305349 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0577b42c-04f7-4ebe-afda-6b968475f302-serving-cert\") pod \"apiserver-7bbb656c7d-m459v\" (UID: \"0577b42c-04f7-4ebe-afda-6b968475f302\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.305371 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fxj7w\" (UniqueName: \"kubernetes.io/projected/c182330a-7ed5-4b31-8d80-d348e821c749-kube-api-access-fxj7w\") pod \"machine-approver-56656f9798-fc99x\" (UID: \"c182330a-7ed5-4b31-8d80-d348e821c749\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fc99x" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.305425 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2ccck\" (UniqueName: \"kubernetes.io/projected/278ddf9e-14a8-43dd-820a-bfda668bbce1-kube-api-access-2ccck\") pod \"openshift-config-operator-7777fb866f-sph7r\" (UID: \"278ddf9e-14a8-43dd-820a-bfda668bbce1\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-sph7r" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.305441 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-s85fr"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.305472 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.305509 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/4db5010a-367e-473a-8e9e-56febfd76781-image-import-ca\") pod \"apiserver-76f77b778f-6b2ss\" (UID: \"4db5010a-367e-473a-8e9e-56febfd76781\") " pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.305536 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a0e11694-91c8-4c71-88a5-f78c1285acb6-serving-cert\") pod \"etcd-operator-b45778765-6n9v7\" (UID: \"a0e11694-91c8-4c71-88a5-f78c1285acb6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6n9v7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.305558 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a1312328-61e0-4c2a-9cf1-dd3c72ebbf4b-service-ca-bundle\") pod \"authentication-operator-69f744f599-ljnvj\" (UID: \"a1312328-61e0-4c2a-9cf1-dd3c72ebbf4b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ljnvj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.305782 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-trusted-ca-bundle\") pod \"console-f9d7485db-h9wgf\" (UID: \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\") " pod="openshift-console/console-f9d7485db-h9wgf" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.305827 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lbkfn\" (UniqueName: \"kubernetes.io/projected/28d1a6e7-60cf-4233-9298-4a561b105271-kube-api-access-lbkfn\") pod \"marketplace-operator-79b997595-cbkxk\" (UID: \"28d1a6e7-60cf-4233-9298-4a561b105271\") " pod="openshift-marketplace/marketplace-operator-79b997595-cbkxk" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.305850 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b6sjm\" (UniqueName: \"kubernetes.io/projected/6d32b123-7986-4bd2-abdf-b8be8c855817-kube-api-access-b6sjm\") pod \"route-controller-manager-6576b87f9c-zxfcj\" (UID: \"6d32b123-7986-4bd2-abdf-b8be8c855817\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zxfcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.305894 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.305938 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b7afb5cd-ee13-48a2-9d6f-14ce967d98fe-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-rs5qs\" (UID: \"b7afb5cd-ee13-48a2-9d6f-14ce967d98fe\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rs5qs" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.305977 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.306017 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/c182330a-7ed5-4b31-8d80-d348e821c749-machine-approver-tls\") pod \"machine-approver-56656f9798-fc99x\" (UID: \"c182330a-7ed5-4b31-8d80-d348e821c749\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fc99x" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.306079 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4db5010a-367e-473a-8e9e-56febfd76781-trusted-ca-bundle\") pod \"apiserver-76f77b778f-6b2ss\" (UID: \"4db5010a-367e-473a-8e9e-56febfd76781\") " pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.306114 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a1312328-61e0-4c2a-9cf1-dd3c72ebbf4b-serving-cert\") pod \"authentication-operator-69f744f599-ljnvj\" (UID: \"a1312328-61e0-4c2a-9cf1-dd3c72ebbf4b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ljnvj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.306166 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8678w\" (UniqueName: \"kubernetes.io/projected/a0e11694-91c8-4c71-88a5-f78c1285acb6-kube-api-access-8678w\") pod \"etcd-operator-b45778765-6n9v7\" (UID: \"a0e11694-91c8-4c71-88a5-f78c1285acb6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6n9v7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.306204 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/12724cb5-e0ed-4c92-93e6-0f223dd11bea-images\") pod \"machine-api-operator-5694c8668f-m8bj7\" (UID: \"12724cb5-e0ed-4c92-93e6-0f223dd11bea\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-m8bj7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.306234 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/12724cb5-e0ed-4c92-93e6-0f223dd11bea-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-m8bj7\" (UID: \"12724cb5-e0ed-4c92-93e6-0f223dd11bea\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-m8bj7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.306255 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/a0e11694-91c8-4c71-88a5-f78c1285acb6-etcd-client\") pod \"etcd-operator-b45778765-6n9v7\" (UID: \"a0e11694-91c8-4c71-88a5-f78c1285acb6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6n9v7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.306284 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nhzrj\" (UniqueName: \"kubernetes.io/projected/4d74fbd6-b66d-4ad5-8bf7-06ce9451fa96-kube-api-access-nhzrj\") pod \"cluster-samples-operator-665b6dd947-qx8cv\" (UID: \"4d74fbd6-b66d-4ad5-8bf7-06ce9451fa96\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qx8cv" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.306319 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0577b42c-04f7-4ebe-afda-6b968475f302-audit-dir\") pod \"apiserver-7bbb656c7d-m459v\" (UID: \"0577b42c-04f7-4ebe-afda-6b968475f302\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.306344 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cwr6\" (UniqueName: \"kubernetes.io/projected/a1312328-61e0-4c2a-9cf1-dd3c72ebbf4b-kube-api-access-7cwr6\") pod \"authentication-operator-69f744f599-ljnvj\" (UID: \"a1312328-61e0-4c2a-9cf1-dd3c72ebbf4b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ljnvj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.306365 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0e11694-91c8-4c71-88a5-f78c1285acb6-config\") pod \"etcd-operator-b45778765-6n9v7\" (UID: \"a0e11694-91c8-4c71-88a5-f78c1285acb6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6n9v7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.306397 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/53aa0d7f-022e-46a2-9e47-442eca753bbc-serving-cert\") pod \"controller-manager-879f6c89f-7p2br\" (UID: \"53aa0d7f-022e-46a2-9e47-442eca753bbc\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7p2br" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.306426 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/0577b42c-04f7-4ebe-afda-6b968475f302-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-m459v\" (UID: \"0577b42c-04f7-4ebe-afda-6b968475f302\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.306461 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c182330a-7ed5-4b31-8d80-d348e821c749-config\") pod \"machine-approver-56656f9798-fc99x\" (UID: \"c182330a-7ed5-4b31-8d80-d348e821c749\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fc99x" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.306521 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a1312328-61e0-4c2a-9cf1-dd3c72ebbf4b-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-ljnvj\" (UID: \"a1312328-61e0-4c2a-9cf1-dd3c72ebbf4b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ljnvj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.306536 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/35aab505-1a5e-463c-a9af-ea76e2b866de-srv-cert\") pod \"olm-operator-6b444d44fb-5m226\" (UID: \"35aab505-1a5e-463c-a9af-ea76e2b866de\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5m226" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.306641 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.306657 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b7afb5cd-ee13-48a2-9d6f-14ce967d98fe-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-rs5qs\" (UID: \"b7afb5cd-ee13-48a2-9d6f-14ce967d98fe\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rs5qs" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.306748 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rf5b4\" (UniqueName: \"kubernetes.io/projected/b7afb5cd-ee13-48a2-9d6f-14ce967d98fe-kube-api-access-rf5b4\") pod \"cluster-image-registry-operator-dc59b4c8b-rs5qs\" (UID: \"b7afb5cd-ee13-48a2-9d6f-14ce967d98fe\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rs5qs" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.306788 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-service-ca\") pod \"console-f9d7485db-h9wgf\" (UID: \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\") " pod="openshift-console/console-f9d7485db-h9wgf" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.306910 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4db5010a-367e-473a-8e9e-56febfd76781-audit-dir\") pod \"apiserver-76f77b778f-6b2ss\" (UID: \"4db5010a-367e-473a-8e9e-56febfd76781\") " pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.306928 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.306947 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmtrh\" (UniqueName: \"kubernetes.io/projected/35aab505-1a5e-463c-a9af-ea76e2b866de-kube-api-access-zmtrh\") pod \"olm-operator-6b444d44fb-5m226\" (UID: \"35aab505-1a5e-463c-a9af-ea76e2b866de\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5m226" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.307071 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/4db5010a-367e-473a-8e9e-56febfd76781-audit\") pod \"apiserver-76f77b778f-6b2ss\" (UID: \"4db5010a-367e-473a-8e9e-56febfd76781\") " pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.307086 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/a0e11694-91c8-4c71-88a5-f78c1285acb6-etcd-ca\") pod \"etcd-operator-b45778765-6n9v7\" (UID: \"a0e11694-91c8-4c71-88a5-f78c1285acb6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6n9v7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.307101 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f29fbcd4-390b-4c16-8d35-331bf36b835a-bound-sa-token\") pod \"ingress-operator-5b745b69d9-mdm27\" (UID: \"f29fbcd4-390b-4c16-8d35-331bf36b835a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mdm27" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.307525 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/278ddf9e-14a8-43dd-820a-bfda668bbce1-serving-cert\") pod \"openshift-config-operator-7777fb866f-sph7r\" (UID: \"278ddf9e-14a8-43dd-820a-bfda668bbce1\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-sph7r" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.307555 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/28d1a6e7-60cf-4233-9298-4a561b105271-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-cbkxk\" (UID: \"28d1a6e7-60cf-4233-9298-4a561b105271\") " pod="openshift-marketplace/marketplace-operator-79b997595-cbkxk" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.307572 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0577b42c-04f7-4ebe-afda-6b968475f302-audit-policies\") pod \"apiserver-7bbb656c7d-m459v\" (UID: \"0577b42c-04f7-4ebe-afda-6b968475f302\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.307610 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/a0e11694-91c8-4c71-88a5-f78c1285acb6-etcd-service-ca\") pod \"etcd-operator-b45778765-6n9v7\" (UID: \"a0e11694-91c8-4c71-88a5-f78c1285acb6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6n9v7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.307627 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.307667 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/4db5010a-367e-473a-8e9e-56febfd76781-node-pullsecrets\") pod \"apiserver-76f77b778f-6b2ss\" (UID: \"4db5010a-367e-473a-8e9e-56febfd76781\") " pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.307688 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cnpc7\" (UniqueName: \"kubernetes.io/projected/4db5010a-367e-473a-8e9e-56febfd76781-kube-api-access-cnpc7\") pod \"apiserver-76f77b778f-6b2ss\" (UID: \"4db5010a-367e-473a-8e9e-56febfd76781\") " pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.307703 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7bbq6\" (UniqueName: \"kubernetes.io/projected/4e3edb7c-7fcb-4525-975e-227838552f54-kube-api-access-7bbq6\") pod \"migrator-59844c95c7-k5dmp\" (UID: \"4e3edb7c-7fcb-4525-975e-227838552f54\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-k5dmp" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.307721 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2d405da4-b29b-4e37-8871-a7db10fdc8d5-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-5vnfr\" (UID: \"2d405da4-b29b-4e37-8871-a7db10fdc8d5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5vnfr" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.307737 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7dd994cd-9276-4e56-aa73-db6007804b05-trusted-ca\") pod \"console-operator-58897d9998-zw9jm\" (UID: \"7dd994cd-9276-4e56-aa73-db6007804b05\") " pod="openshift-console-operator/console-operator-58897d9998-zw9jm" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.307944 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-sph7r"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.310329 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-h2rcj"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.311782 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-7cng4"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.312958 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4npq9"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.313948 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-m8bj7"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.314909 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-dbzsg"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.316294 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wn248"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.316879 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-vq867"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.318274 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-cbkxk"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.320011 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-mdm27"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.320041 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5vnfr"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.321464 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qx8cv"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.322725 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-wrdd6"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.323871 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-q96bx"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.324770 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.325269 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-zw9jm"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.326488 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-6n9v7"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.329300 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-lsqrb"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.329333 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399505-btzlm"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.329909 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-744kk"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.331087 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-7shsd"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.332834 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-wgf2w"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.333843 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-wgf2w" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.334270 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-sr2d4"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.334892 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-sr2d4" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.335644 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-c8zrh"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.336502 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9znlm"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.337562 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-22hbl"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.338906 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-sr2d4"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.340120 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-w92ph"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.340568 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-w92ph" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.340944 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-w92ph"] Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.344185 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.364852 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.384755 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.404206 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.408421 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a1312328-61e0-4c2a-9cf1-dd3c72ebbf4b-service-ca-bundle\") pod \"authentication-operator-69f744f599-ljnvj\" (UID: \"a1312328-61e0-4c2a-9cf1-dd3c72ebbf4b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ljnvj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.408485 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-trusted-ca-bundle\") pod \"console-f9d7485db-h9wgf\" (UID: \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\") " pod="openshift-console/console-f9d7485db-h9wgf" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.408512 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lbkfn\" (UniqueName: \"kubernetes.io/projected/28d1a6e7-60cf-4233-9298-4a561b105271-kube-api-access-lbkfn\") pod \"marketplace-operator-79b997595-cbkxk\" (UID: \"28d1a6e7-60cf-4233-9298-4a561b105271\") " pod="openshift-marketplace/marketplace-operator-79b997595-cbkxk" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.408529 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b6sjm\" (UniqueName: \"kubernetes.io/projected/6d32b123-7986-4bd2-abdf-b8be8c855817-kube-api-access-b6sjm\") pod \"route-controller-manager-6576b87f9c-zxfcj\" (UID: \"6d32b123-7986-4bd2-abdf-b8be8c855817\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zxfcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.408553 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.408598 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b7afb5cd-ee13-48a2-9d6f-14ce967d98fe-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-rs5qs\" (UID: \"b7afb5cd-ee13-48a2-9d6f-14ce967d98fe\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rs5qs" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.408623 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4db5010a-367e-473a-8e9e-56febfd76781-trusted-ca-bundle\") pod \"apiserver-76f77b778f-6b2ss\" (UID: \"4db5010a-367e-473a-8e9e-56febfd76781\") " pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.408642 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.408665 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/c182330a-7ed5-4b31-8d80-d348e821c749-machine-approver-tls\") pod \"machine-approver-56656f9798-fc99x\" (UID: \"c182330a-7ed5-4b31-8d80-d348e821c749\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fc99x" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.408698 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a1312328-61e0-4c2a-9cf1-dd3c72ebbf4b-serving-cert\") pod \"authentication-operator-69f744f599-ljnvj\" (UID: \"a1312328-61e0-4c2a-9cf1-dd3c72ebbf4b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ljnvj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.408731 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8678w\" (UniqueName: \"kubernetes.io/projected/a0e11694-91c8-4c71-88a5-f78c1285acb6-kube-api-access-8678w\") pod \"etcd-operator-b45778765-6n9v7\" (UID: \"a0e11694-91c8-4c71-88a5-f78c1285acb6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6n9v7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.408755 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/12724cb5-e0ed-4c92-93e6-0f223dd11bea-images\") pod \"machine-api-operator-5694c8668f-m8bj7\" (UID: \"12724cb5-e0ed-4c92-93e6-0f223dd11bea\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-m8bj7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.408779 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/12724cb5-e0ed-4c92-93e6-0f223dd11bea-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-m8bj7\" (UID: \"12724cb5-e0ed-4c92-93e6-0f223dd11bea\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-m8bj7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.408797 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/a0e11694-91c8-4c71-88a5-f78c1285acb6-etcd-client\") pod \"etcd-operator-b45778765-6n9v7\" (UID: \"a0e11694-91c8-4c71-88a5-f78c1285acb6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6n9v7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.408815 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nhzrj\" (UniqueName: \"kubernetes.io/projected/4d74fbd6-b66d-4ad5-8bf7-06ce9451fa96-kube-api-access-nhzrj\") pod \"cluster-samples-operator-665b6dd947-qx8cv\" (UID: \"4d74fbd6-b66d-4ad5-8bf7-06ce9451fa96\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qx8cv" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.408837 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0577b42c-04f7-4ebe-afda-6b968475f302-audit-dir\") pod \"apiserver-7bbb656c7d-m459v\" (UID: \"0577b42c-04f7-4ebe-afda-6b968475f302\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.408866 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a1312328-61e0-4c2a-9cf1-dd3c72ebbf4b-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-ljnvj\" (UID: \"a1312328-61e0-4c2a-9cf1-dd3c72ebbf4b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ljnvj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.408890 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cwr6\" (UniqueName: \"kubernetes.io/projected/a1312328-61e0-4c2a-9cf1-dd3c72ebbf4b-kube-api-access-7cwr6\") pod \"authentication-operator-69f744f599-ljnvj\" (UID: \"a1312328-61e0-4c2a-9cf1-dd3c72ebbf4b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ljnvj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.408912 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0e11694-91c8-4c71-88a5-f78c1285acb6-config\") pod \"etcd-operator-b45778765-6n9v7\" (UID: \"a0e11694-91c8-4c71-88a5-f78c1285acb6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6n9v7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.408933 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/53aa0d7f-022e-46a2-9e47-442eca753bbc-serving-cert\") pod \"controller-manager-879f6c89f-7p2br\" (UID: \"53aa0d7f-022e-46a2-9e47-442eca753bbc\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7p2br" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.408955 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/0577b42c-04f7-4ebe-afda-6b968475f302-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-m459v\" (UID: \"0577b42c-04f7-4ebe-afda-6b968475f302\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.408977 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c182330a-7ed5-4b31-8d80-d348e821c749-config\") pod \"machine-approver-56656f9798-fc99x\" (UID: \"c182330a-7ed5-4b31-8d80-d348e821c749\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fc99x" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.409006 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/35aab505-1a5e-463c-a9af-ea76e2b866de-srv-cert\") pod \"olm-operator-6b444d44fb-5m226\" (UID: \"35aab505-1a5e-463c-a9af-ea76e2b866de\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5m226" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.409024 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b7afb5cd-ee13-48a2-9d6f-14ce967d98fe-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-rs5qs\" (UID: \"b7afb5cd-ee13-48a2-9d6f-14ce967d98fe\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rs5qs" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.409043 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rf5b4\" (UniqueName: \"kubernetes.io/projected/b7afb5cd-ee13-48a2-9d6f-14ce967d98fe-kube-api-access-rf5b4\") pod \"cluster-image-registry-operator-dc59b4c8b-rs5qs\" (UID: \"b7afb5cd-ee13-48a2-9d6f-14ce967d98fe\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rs5qs" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.409062 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-service-ca\") pod \"console-f9d7485db-h9wgf\" (UID: \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\") " pod="openshift-console/console-f9d7485db-h9wgf" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.409129 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a1312328-61e0-4c2a-9cf1-dd3c72ebbf4b-service-ca-bundle\") pod \"authentication-operator-69f744f599-ljnvj\" (UID: \"a1312328-61e0-4c2a-9cf1-dd3c72ebbf4b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ljnvj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.409175 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4db5010a-367e-473a-8e9e-56febfd76781-audit-dir\") pod \"apiserver-76f77b778f-6b2ss\" (UID: \"4db5010a-367e-473a-8e9e-56febfd76781\") " pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.409135 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4db5010a-367e-473a-8e9e-56febfd76781-audit-dir\") pod \"apiserver-76f77b778f-6b2ss\" (UID: \"4db5010a-367e-473a-8e9e-56febfd76781\") " pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.409214 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.409354 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zmtrh\" (UniqueName: \"kubernetes.io/projected/35aab505-1a5e-463c-a9af-ea76e2b866de-kube-api-access-zmtrh\") pod \"olm-operator-6b444d44fb-5m226\" (UID: \"35aab505-1a5e-463c-a9af-ea76e2b866de\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5m226" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.409389 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/4db5010a-367e-473a-8e9e-56febfd76781-audit\") pod \"apiserver-76f77b778f-6b2ss\" (UID: \"4db5010a-367e-473a-8e9e-56febfd76781\") " pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.409411 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/a0e11694-91c8-4c71-88a5-f78c1285acb6-etcd-ca\") pod \"etcd-operator-b45778765-6n9v7\" (UID: \"a0e11694-91c8-4c71-88a5-f78c1285acb6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6n9v7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.409428 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-trusted-ca-bundle\") pod \"console-f9d7485db-h9wgf\" (UID: \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\") " pod="openshift-console/console-f9d7485db-h9wgf" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.409432 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f29fbcd4-390b-4c16-8d35-331bf36b835a-bound-sa-token\") pod \"ingress-operator-5b745b69d9-mdm27\" (UID: \"f29fbcd4-390b-4c16-8d35-331bf36b835a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mdm27" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.409480 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/278ddf9e-14a8-43dd-820a-bfda668bbce1-serving-cert\") pod \"openshift-config-operator-7777fb866f-sph7r\" (UID: \"278ddf9e-14a8-43dd-820a-bfda668bbce1\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-sph7r" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.409499 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/28d1a6e7-60cf-4233-9298-4a561b105271-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-cbkxk\" (UID: \"28d1a6e7-60cf-4233-9298-4a561b105271\") " pod="openshift-marketplace/marketplace-operator-79b997595-cbkxk" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.409516 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0577b42c-04f7-4ebe-afda-6b968475f302-audit-policies\") pod \"apiserver-7bbb656c7d-m459v\" (UID: \"0577b42c-04f7-4ebe-afda-6b968475f302\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.409530 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/a0e11694-91c8-4c71-88a5-f78c1285acb6-etcd-service-ca\") pod \"etcd-operator-b45778765-6n9v7\" (UID: \"a0e11694-91c8-4c71-88a5-f78c1285acb6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6n9v7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.409547 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.409594 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/4db5010a-367e-473a-8e9e-56febfd76781-node-pullsecrets\") pod \"apiserver-76f77b778f-6b2ss\" (UID: \"4db5010a-367e-473a-8e9e-56febfd76781\") " pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.409614 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cnpc7\" (UniqueName: \"kubernetes.io/projected/4db5010a-367e-473a-8e9e-56febfd76781-kube-api-access-cnpc7\") pod \"apiserver-76f77b778f-6b2ss\" (UID: \"4db5010a-367e-473a-8e9e-56febfd76781\") " pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.409634 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7bbq6\" (UniqueName: \"kubernetes.io/projected/4e3edb7c-7fcb-4525-975e-227838552f54-kube-api-access-7bbq6\") pod \"migrator-59844c95c7-k5dmp\" (UID: \"4e3edb7c-7fcb-4525-975e-227838552f54\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-k5dmp" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.409652 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2d405da4-b29b-4e37-8871-a7db10fdc8d5-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-5vnfr\" (UID: \"2d405da4-b29b-4e37-8871-a7db10fdc8d5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5vnfr" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.409670 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7dd994cd-9276-4e56-aa73-db6007804b05-trusted-ca\") pod \"console-operator-58897d9998-zw9jm\" (UID: \"7dd994cd-9276-4e56-aa73-db6007804b05\") " pod="openshift-console-operator/console-operator-58897d9998-zw9jm" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.409686 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sl9kp\" (UniqueName: \"kubernetes.io/projected/0577b42c-04f7-4ebe-afda-6b968475f302-kube-api-access-sl9kp\") pod \"apiserver-7bbb656c7d-m459v\" (UID: \"0577b42c-04f7-4ebe-afda-6b968475f302\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.409704 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f29fbcd4-390b-4c16-8d35-331bf36b835a-trusted-ca\") pod \"ingress-operator-5b745b69d9-mdm27\" (UID: \"f29fbcd4-390b-4c16-8d35-331bf36b835a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mdm27" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.409754 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6d32b123-7986-4bd2-abdf-b8be8c855817-client-ca\") pod \"route-controller-manager-6576b87f9c-zxfcj\" (UID: \"6d32b123-7986-4bd2-abdf-b8be8c855817\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zxfcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.409775 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vmtwh\" (UniqueName: \"kubernetes.io/projected/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-kube-api-access-vmtwh\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.409793 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c182330a-7ed5-4b31-8d80-d348e821c749-auth-proxy-config\") pod \"machine-approver-56656f9798-fc99x\" (UID: \"c182330a-7ed5-4b31-8d80-d348e821c749\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fc99x" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.409812 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-console-serving-cert\") pod \"console-f9d7485db-h9wgf\" (UID: \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\") " pod="openshift-console/console-f9d7485db-h9wgf" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.409830 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-86n2d\" (UniqueName: \"kubernetes.io/projected/1b8890e7-15c8-4467-b31b-493b565c584a-kube-api-access-86n2d\") pod \"downloads-7954f5f757-7cng4\" (UID: \"1b8890e7-15c8-4467-b31b-493b565c584a\") " pod="openshift-console/downloads-7954f5f757-7cng4" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.409852 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cwktd\" (UniqueName: \"kubernetes.io/projected/7dd994cd-9276-4e56-aa73-db6007804b05-kube-api-access-cwktd\") pod \"console-operator-58897d9998-zw9jm\" (UID: \"7dd994cd-9276-4e56-aa73-db6007804b05\") " pod="openshift-console-operator/console-operator-58897d9998-zw9jm" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.409867 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4db5010a-367e-473a-8e9e-56febfd76781-trusted-ca-bundle\") pod \"apiserver-76f77b778f-6b2ss\" (UID: \"4db5010a-367e-473a-8e9e-56febfd76781\") " pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.409880 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6xjtp\" (UniqueName: \"kubernetes.io/projected/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-kube-api-access-6xjtp\") pod \"console-f9d7485db-h9wgf\" (UID: \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\") " pod="openshift-console/console-f9d7485db-h9wgf" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410130 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d32b123-7986-4bd2-abdf-b8be8c855817-config\") pod \"route-controller-manager-6576b87f9c-zxfcj\" (UID: \"6d32b123-7986-4bd2-abdf-b8be8c855817\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zxfcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410168 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/4db5010a-367e-473a-8e9e-56febfd76781-etcd-serving-ca\") pod \"apiserver-76f77b778f-6b2ss\" (UID: \"4db5010a-367e-473a-8e9e-56febfd76781\") " pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410189 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410207 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12724cb5-e0ed-4c92-93e6-0f223dd11bea-config\") pod \"machine-api-operator-5694c8668f-m8bj7\" (UID: \"12724cb5-e0ed-4c92-93e6-0f223dd11bea\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-m8bj7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410224 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0577b42c-04f7-4ebe-afda-6b968475f302-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-m459v\" (UID: \"0577b42c-04f7-4ebe-afda-6b968475f302\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410240 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410258 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-console-config\") pod \"console-f9d7485db-h9wgf\" (UID: \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\") " pod="openshift-console/console-f9d7485db-h9wgf" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410274 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/53aa0d7f-022e-46a2-9e47-442eca753bbc-config\") pod \"controller-manager-879f6c89f-7p2br\" (UID: \"53aa0d7f-022e-46a2-9e47-442eca753bbc\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7p2br" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410289 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-audit-dir\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410308 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7dd994cd-9276-4e56-aa73-db6007804b05-config\") pod \"console-operator-58897d9998-zw9jm\" (UID: \"7dd994cd-9276-4e56-aa73-db6007804b05\") " pod="openshift-console-operator/console-operator-58897d9998-zw9jm" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410336 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x8f2n\" (UniqueName: \"kubernetes.io/projected/12724cb5-e0ed-4c92-93e6-0f223dd11bea-kube-api-access-x8f2n\") pod \"machine-api-operator-5694c8668f-m8bj7\" (UID: \"12724cb5-e0ed-4c92-93e6-0f223dd11bea\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-m8bj7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410352 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-oauth-serving-cert\") pod \"console-f9d7485db-h9wgf\" (UID: \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\") " pod="openshift-console/console-f9d7485db-h9wgf" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410367 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/4db5010a-367e-473a-8e9e-56febfd76781-encryption-config\") pod \"apiserver-76f77b778f-6b2ss\" (UID: \"4db5010a-367e-473a-8e9e-56febfd76781\") " pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410383 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a556a2eb-6c10-4dd1-ad6e-5cc4084a4497-metrics-tls\") pod \"dns-operator-744455d44c-gs8l7\" (UID: \"a556a2eb-6c10-4dd1-ad6e-5cc4084a4497\") " pod="openshift-dns-operator/dns-operator-744455d44c-gs8l7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410399 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410420 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4db5010a-367e-473a-8e9e-56febfd76781-config\") pod \"apiserver-76f77b778f-6b2ss\" (UID: \"4db5010a-367e-473a-8e9e-56febfd76781\") " pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410438 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ksmlf\" (UniqueName: \"kubernetes.io/projected/2d405da4-b29b-4e37-8871-a7db10fdc8d5-kube-api-access-ksmlf\") pod \"openshift-apiserver-operator-796bbdcf4f-5vnfr\" (UID: \"2d405da4-b29b-4e37-8871-a7db10fdc8d5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5vnfr" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410469 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/4d74fbd6-b66d-4ad5-8bf7-06ce9451fa96-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-qx8cv\" (UID: \"4d74fbd6-b66d-4ad5-8bf7-06ce9451fa96\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qx8cv" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410488 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/278ddf9e-14a8-43dd-820a-bfda668bbce1-available-featuregates\") pod \"openshift-config-operator-7777fb866f-sph7r\" (UID: \"278ddf9e-14a8-43dd-820a-bfda668bbce1\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-sph7r" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410511 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d405da4-b29b-4e37-8871-a7db10fdc8d5-config\") pod \"openshift-apiserver-operator-796bbdcf4f-5vnfr\" (UID: \"2d405da4-b29b-4e37-8871-a7db10fdc8d5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5vnfr" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410529 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0935333c-8a58-4595-806a-d765d455f44c-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-s85fr\" (UID: \"0935333c-8a58-4595-806a-d765d455f44c\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-s85fr" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410545 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410589 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/53aa0d7f-022e-46a2-9e47-442eca753bbc-client-ca\") pod \"controller-manager-879f6c89f-7p2br\" (UID: \"53aa0d7f-022e-46a2-9e47-442eca753bbc\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7p2br" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410604 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/53aa0d7f-022e-46a2-9e47-442eca753bbc-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-7p2br\" (UID: \"53aa0d7f-022e-46a2-9e47-442eca753bbc\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7p2br" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410618 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/f29fbcd4-390b-4c16-8d35-331bf36b835a-metrics-tls\") pod \"ingress-operator-5b745b69d9-mdm27\" (UID: \"f29fbcd4-390b-4c16-8d35-331bf36b835a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mdm27" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410634 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b7afb5cd-ee13-48a2-9d6f-14ce967d98fe-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-rs5qs\" (UID: \"b7afb5cd-ee13-48a2-9d6f-14ce967d98fe\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rs5qs" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410652 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/35aab505-1a5e-463c-a9af-ea76e2b866de-profile-collector-cert\") pod \"olm-operator-6b444d44fb-5m226\" (UID: \"35aab505-1a5e-463c-a9af-ea76e2b866de\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5m226" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410697 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410713 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7dd994cd-9276-4e56-aa73-db6007804b05-serving-cert\") pod \"console-operator-58897d9998-zw9jm\" (UID: \"7dd994cd-9276-4e56-aa73-db6007804b05\") " pod="openshift-console-operator/console-operator-58897d9998-zw9jm" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410732 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1312328-61e0-4c2a-9cf1-dd3c72ebbf4b-config\") pod \"authentication-operator-69f744f599-ljnvj\" (UID: \"a1312328-61e0-4c2a-9cf1-dd3c72ebbf4b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ljnvj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410746 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/4db5010a-367e-473a-8e9e-56febfd76781-etcd-client\") pod \"apiserver-76f77b778f-6b2ss\" (UID: \"4db5010a-367e-473a-8e9e-56febfd76781\") " pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410763 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4db5010a-367e-473a-8e9e-56febfd76781-serving-cert\") pod \"apiserver-76f77b778f-6b2ss\" (UID: \"4db5010a-367e-473a-8e9e-56febfd76781\") " pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410780 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lhrzm\" (UniqueName: \"kubernetes.io/projected/f29fbcd4-390b-4c16-8d35-331bf36b835a-kube-api-access-lhrzm\") pod \"ingress-operator-5b745b69d9-mdm27\" (UID: \"f29fbcd4-390b-4c16-8d35-331bf36b835a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mdm27" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410796 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/28d1a6e7-60cf-4233-9298-4a561b105271-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-cbkxk\" (UID: \"28d1a6e7-60cf-4233-9298-4a561b105271\") " pod="openshift-marketplace/marketplace-operator-79b997595-cbkxk" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410812 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0935333c-8a58-4595-806a-d765d455f44c-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-s85fr\" (UID: \"0935333c-8a58-4595-806a-d765d455f44c\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-s85fr" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410828 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0577b42c-04f7-4ebe-afda-6b968475f302-etcd-client\") pod \"apiserver-7bbb656c7d-m459v\" (UID: \"0577b42c-04f7-4ebe-afda-6b968475f302\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410845 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-audit-policies\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410892 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410900 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410944 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d32b123-7986-4bd2-abdf-b8be8c855817-serving-cert\") pod \"route-controller-manager-6576b87f9c-zxfcj\" (UID: \"6d32b123-7986-4bd2-abdf-b8be8c855817\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zxfcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410966 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45qj5\" (UniqueName: \"kubernetes.io/projected/53aa0d7f-022e-46a2-9e47-442eca753bbc-kube-api-access-45qj5\") pod \"controller-manager-879f6c89f-7p2br\" (UID: \"53aa0d7f-022e-46a2-9e47-442eca753bbc\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7p2br" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410982 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/0577b42c-04f7-4ebe-afda-6b968475f302-encryption-config\") pod \"apiserver-7bbb656c7d-m459v\" (UID: \"0577b42c-04f7-4ebe-afda-6b968475f302\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.410999 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-console-oauth-config\") pod \"console-f9d7485db-h9wgf\" (UID: \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\") " pod="openshift-console/console-f9d7485db-h9wgf" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.411009 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7dd994cd-9276-4e56-aa73-db6007804b05-trusted-ca\") pod \"console-operator-58897d9998-zw9jm\" (UID: \"7dd994cd-9276-4e56-aa73-db6007804b05\") " pod="openshift-console-operator/console-operator-58897d9998-zw9jm" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.411016 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nwmc8\" (UniqueName: \"kubernetes.io/projected/a556a2eb-6c10-4dd1-ad6e-5cc4084a4497-kube-api-access-nwmc8\") pod \"dns-operator-744455d44c-gs8l7\" (UID: \"a556a2eb-6c10-4dd1-ad6e-5cc4084a4497\") " pod="openshift-dns-operator/dns-operator-744455d44c-gs8l7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.411068 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0935333c-8a58-4595-806a-d765d455f44c-config\") pod \"kube-apiserver-operator-766d6c64bb-s85fr\" (UID: \"0935333c-8a58-4595-806a-d765d455f44c\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-s85fr" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.411115 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0577b42c-04f7-4ebe-afda-6b968475f302-serving-cert\") pod \"apiserver-7bbb656c7d-m459v\" (UID: \"0577b42c-04f7-4ebe-afda-6b968475f302\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.411138 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fxj7w\" (UniqueName: \"kubernetes.io/projected/c182330a-7ed5-4b31-8d80-d348e821c749-kube-api-access-fxj7w\") pod \"machine-approver-56656f9798-fc99x\" (UID: \"c182330a-7ed5-4b31-8d80-d348e821c749\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fc99x" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.411166 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/4db5010a-367e-473a-8e9e-56febfd76781-image-import-ca\") pod \"apiserver-76f77b778f-6b2ss\" (UID: \"4db5010a-367e-473a-8e9e-56febfd76781\") " pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.411222 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a0e11694-91c8-4c71-88a5-f78c1285acb6-serving-cert\") pod \"etcd-operator-b45778765-6n9v7\" (UID: \"a0e11694-91c8-4c71-88a5-f78c1285acb6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6n9v7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.411245 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2ccck\" (UniqueName: \"kubernetes.io/projected/278ddf9e-14a8-43dd-820a-bfda668bbce1-kube-api-access-2ccck\") pod \"openshift-config-operator-7777fb866f-sph7r\" (UID: \"278ddf9e-14a8-43dd-820a-bfda668bbce1\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-sph7r" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.411267 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.412990 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/4db5010a-367e-473a-8e9e-56febfd76781-audit\") pod \"apiserver-76f77b778f-6b2ss\" (UID: \"4db5010a-367e-473a-8e9e-56febfd76781\") " pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.413731 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d32b123-7986-4bd2-abdf-b8be8c855817-config\") pod \"route-controller-manager-6576b87f9c-zxfcj\" (UID: \"6d32b123-7986-4bd2-abdf-b8be8c855817\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zxfcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.416385 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.416655 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b7afb5cd-ee13-48a2-9d6f-14ce967d98fe-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-rs5qs\" (UID: \"b7afb5cd-ee13-48a2-9d6f-14ce967d98fe\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rs5qs" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.416866 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.417025 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a1312328-61e0-4c2a-9cf1-dd3c72ebbf4b-serving-cert\") pod \"authentication-operator-69f744f599-ljnvj\" (UID: \"a1312328-61e0-4c2a-9cf1-dd3c72ebbf4b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ljnvj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.417092 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2d405da4-b29b-4e37-8871-a7db10fdc8d5-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-5vnfr\" (UID: \"2d405da4-b29b-4e37-8871-a7db10fdc8d5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5vnfr" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.417743 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0577b42c-04f7-4ebe-afda-6b968475f302-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-m459v\" (UID: \"0577b42c-04f7-4ebe-afda-6b968475f302\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.418430 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0577b42c-04f7-4ebe-afda-6b968475f302-audit-policies\") pod \"apiserver-7bbb656c7d-m459v\" (UID: \"0577b42c-04f7-4ebe-afda-6b968475f302\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.419128 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-oauth-serving-cert\") pod \"console-f9d7485db-h9wgf\" (UID: \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\") " pod="openshift-console/console-f9d7485db-h9wgf" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.419164 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/278ddf9e-14a8-43dd-820a-bfda668bbce1-serving-cert\") pod \"openshift-config-operator-7777fb866f-sph7r\" (UID: \"278ddf9e-14a8-43dd-820a-bfda668bbce1\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-sph7r" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.419562 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.419830 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/53aa0d7f-022e-46a2-9e47-442eca753bbc-client-ca\") pod \"controller-manager-879f6c89f-7p2br\" (UID: \"53aa0d7f-022e-46a2-9e47-442eca753bbc\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7p2br" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.420047 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f29fbcd4-390b-4c16-8d35-331bf36b835a-trusted-ca\") pod \"ingress-operator-5b745b69d9-mdm27\" (UID: \"f29fbcd4-390b-4c16-8d35-331bf36b835a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mdm27" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.420620 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/53aa0d7f-022e-46a2-9e47-442eca753bbc-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-7p2br\" (UID: \"53aa0d7f-022e-46a2-9e47-442eca753bbc\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7p2br" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.420770 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6d32b123-7986-4bd2-abdf-b8be8c855817-client-ca\") pod \"route-controller-manager-6576b87f9c-zxfcj\" (UID: \"6d32b123-7986-4bd2-abdf-b8be8c855817\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zxfcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.421589 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c182330a-7ed5-4b31-8d80-d348e821c749-auth-proxy-config\") pod \"machine-approver-56656f9798-fc99x\" (UID: \"c182330a-7ed5-4b31-8d80-d348e821c749\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fc99x" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.422137 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-audit-policies\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.422510 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/4d74fbd6-b66d-4ad5-8bf7-06ce9451fa96-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-qx8cv\" (UID: \"4d74fbd6-b66d-4ad5-8bf7-06ce9451fa96\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qx8cv" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.422821 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/53aa0d7f-022e-46a2-9e47-442eca753bbc-config\") pod \"controller-manager-879f6c89f-7p2br\" (UID: \"53aa0d7f-022e-46a2-9e47-442eca753bbc\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7p2br" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.422848 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/278ddf9e-14a8-43dd-820a-bfda668bbce1-available-featuregates\") pod \"openshift-config-operator-7777fb866f-sph7r\" (UID: \"278ddf9e-14a8-43dd-820a-bfda668bbce1\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-sph7r" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.423154 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b7afb5cd-ee13-48a2-9d6f-14ce967d98fe-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-rs5qs\" (UID: \"b7afb5cd-ee13-48a2-9d6f-14ce967d98fe\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rs5qs" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.423389 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.423503 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/f29fbcd4-390b-4c16-8d35-331bf36b835a-metrics-tls\") pod \"ingress-operator-5b745b69d9-mdm27\" (UID: \"f29fbcd4-390b-4c16-8d35-331bf36b835a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mdm27" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.423840 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d405da4-b29b-4e37-8871-a7db10fdc8d5-config\") pod \"openshift-apiserver-operator-796bbdcf4f-5vnfr\" (UID: \"2d405da4-b29b-4e37-8871-a7db10fdc8d5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5vnfr" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.424116 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-console-config\") pod \"console-f9d7485db-h9wgf\" (UID: \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\") " pod="openshift-console/console-f9d7485db-h9wgf" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.424707 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0577b42c-04f7-4ebe-afda-6b968475f302-audit-dir\") pod \"apiserver-7bbb656c7d-m459v\" (UID: \"0577b42c-04f7-4ebe-afda-6b968475f302\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.425009 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/4db5010a-367e-473a-8e9e-56febfd76781-node-pullsecrets\") pod \"apiserver-76f77b778f-6b2ss\" (UID: \"4db5010a-367e-473a-8e9e-56febfd76781\") " pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.425486 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/12724cb5-e0ed-4c92-93e6-0f223dd11bea-images\") pod \"machine-api-operator-5694c8668f-m8bj7\" (UID: \"12724cb5-e0ed-4c92-93e6-0f223dd11bea\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-m8bj7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.425399 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-audit-dir\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.426825 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/c182330a-7ed5-4b31-8d80-d348e821c749-machine-approver-tls\") pod \"machine-approver-56656f9798-fc99x\" (UID: \"c182330a-7ed5-4b31-8d80-d348e821c749\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fc99x" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.427162 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7dd994cd-9276-4e56-aa73-db6007804b05-config\") pod \"console-operator-58897d9998-zw9jm\" (UID: \"7dd994cd-9276-4e56-aa73-db6007804b05\") " pod="openshift-console-operator/console-operator-58897d9998-zw9jm" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.427658 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.427665 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7dd994cd-9276-4e56-aa73-db6007804b05-serving-cert\") pod \"console-operator-58897d9998-zw9jm\" (UID: \"7dd994cd-9276-4e56-aa73-db6007804b05\") " pod="openshift-console-operator/console-operator-58897d9998-zw9jm" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.427848 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a1312328-61e0-4c2a-9cf1-dd3c72ebbf4b-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-ljnvj\" (UID: \"a1312328-61e0-4c2a-9cf1-dd3c72ebbf4b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ljnvj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.428109 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.428474 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.428543 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.428665 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4db5010a-367e-473a-8e9e-56febfd76781-config\") pod \"apiserver-76f77b778f-6b2ss\" (UID: \"4db5010a-367e-473a-8e9e-56febfd76781\") " pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.429695 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0577b42c-04f7-4ebe-afda-6b968475f302-etcd-client\") pod \"apiserver-7bbb656c7d-m459v\" (UID: \"0577b42c-04f7-4ebe-afda-6b968475f302\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.430270 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12724cb5-e0ed-4c92-93e6-0f223dd11bea-config\") pod \"machine-api-operator-5694c8668f-m8bj7\" (UID: \"12724cb5-e0ed-4c92-93e6-0f223dd11bea\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-m8bj7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.430327 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.430487 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c182330a-7ed5-4b31-8d80-d348e821c749-config\") pod \"machine-approver-56656f9798-fc99x\" (UID: \"c182330a-7ed5-4b31-8d80-d348e821c749\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fc99x" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.430705 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/0577b42c-04f7-4ebe-afda-6b968475f302-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-m459v\" (UID: \"0577b42c-04f7-4ebe-afda-6b968475f302\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.430855 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/12724cb5-e0ed-4c92-93e6-0f223dd11bea-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-m8bj7\" (UID: \"12724cb5-e0ed-4c92-93e6-0f223dd11bea\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-m8bj7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.430880 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-service-ca\") pod \"console-f9d7485db-h9wgf\" (UID: \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\") " pod="openshift-console/console-f9d7485db-h9wgf" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.430981 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/a0e11694-91c8-4c71-88a5-f78c1285acb6-etcd-client\") pod \"etcd-operator-b45778765-6n9v7\" (UID: \"a0e11694-91c8-4c71-88a5-f78c1285acb6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6n9v7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.431307 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-console-serving-cert\") pod \"console-f9d7485db-h9wgf\" (UID: \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\") " pod="openshift-console/console-f9d7485db-h9wgf" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.431691 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/4db5010a-367e-473a-8e9e-56febfd76781-etcd-client\") pod \"apiserver-76f77b778f-6b2ss\" (UID: \"4db5010a-367e-473a-8e9e-56febfd76781\") " pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.431985 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/4db5010a-367e-473a-8e9e-56febfd76781-etcd-serving-ca\") pod \"apiserver-76f77b778f-6b2ss\" (UID: \"4db5010a-367e-473a-8e9e-56febfd76781\") " pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.421606 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/4db5010a-367e-473a-8e9e-56febfd76781-encryption-config\") pod \"apiserver-76f77b778f-6b2ss\" (UID: \"4db5010a-367e-473a-8e9e-56febfd76781\") " pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.432266 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4db5010a-367e-473a-8e9e-56febfd76781-serving-cert\") pod \"apiserver-76f77b778f-6b2ss\" (UID: \"4db5010a-367e-473a-8e9e-56febfd76781\") " pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.432378 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/4db5010a-367e-473a-8e9e-56febfd76781-image-import-ca\") pod \"apiserver-76f77b778f-6b2ss\" (UID: \"4db5010a-367e-473a-8e9e-56febfd76781\") " pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.432413 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/53aa0d7f-022e-46a2-9e47-442eca753bbc-serving-cert\") pod \"controller-manager-879f6c89f-7p2br\" (UID: \"53aa0d7f-022e-46a2-9e47-442eca753bbc\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7p2br" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.432397 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-console-oauth-config\") pod \"console-f9d7485db-h9wgf\" (UID: \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\") " pod="openshift-console/console-f9d7485db-h9wgf" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.433244 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1312328-61e0-4c2a-9cf1-dd3c72ebbf4b-config\") pod \"authentication-operator-69f744f599-ljnvj\" (UID: \"a1312328-61e0-4c2a-9cf1-dd3c72ebbf4b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ljnvj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.434196 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/0577b42c-04f7-4ebe-afda-6b968475f302-encryption-config\") pod \"apiserver-7bbb656c7d-m459v\" (UID: \"0577b42c-04f7-4ebe-afda-6b968475f302\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.434765 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a556a2eb-6c10-4dd1-ad6e-5cc4084a4497-metrics-tls\") pod \"dns-operator-744455d44c-gs8l7\" (UID: \"a556a2eb-6c10-4dd1-ad6e-5cc4084a4497\") " pod="openshift-dns-operator/dns-operator-744455d44c-gs8l7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.434967 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a0e11694-91c8-4c71-88a5-f78c1285acb6-serving-cert\") pod \"etcd-operator-b45778765-6n9v7\" (UID: \"a0e11694-91c8-4c71-88a5-f78c1285acb6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6n9v7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.435044 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d32b123-7986-4bd2-abdf-b8be8c855817-serving-cert\") pod \"route-controller-manager-6576b87f9c-zxfcj\" (UID: \"6d32b123-7986-4bd2-abdf-b8be8c855817\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zxfcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.435655 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.436160 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0577b42c-04f7-4ebe-afda-6b968475f302-serving-cert\") pod \"apiserver-7bbb656c7d-m459v\" (UID: \"0577b42c-04f7-4ebe-afda-6b968475f302\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.437333 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0e11694-91c8-4c71-88a5-f78c1285acb6-config\") pod \"etcd-operator-b45778765-6n9v7\" (UID: \"a0e11694-91c8-4c71-88a5-f78c1285acb6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6n9v7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.438575 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.449108 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.451095 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/a0e11694-91c8-4c71-88a5-f78c1285acb6-etcd-ca\") pod \"etcd-operator-b45778765-6n9v7\" (UID: \"a0e11694-91c8-4c71-88a5-f78c1285acb6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6n9v7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.464861 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.467936 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/a0e11694-91c8-4c71-88a5-f78c1285acb6-etcd-service-ca\") pod \"etcd-operator-b45778765-6n9v7\" (UID: \"a0e11694-91c8-4c71-88a5-f78c1285acb6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6n9v7" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.485420 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.505027 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.525666 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.544780 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.565039 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.585435 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.601136 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0935333c-8a58-4595-806a-d765d455f44c-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-s85fr\" (UID: \"0935333c-8a58-4595-806a-d765d455f44c\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-s85fr" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.605136 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.606332 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0935333c-8a58-4595-806a-d765d455f44c-config\") pod \"kube-apiserver-operator-766d6c64bb-s85fr\" (UID: \"0935333c-8a58-4595-806a-d765d455f44c\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-s85fr" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.625254 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.647806 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.665637 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.673236 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/35aab505-1a5e-463c-a9af-ea76e2b866de-srv-cert\") pod \"olm-operator-6b444d44fb-5m226\" (UID: \"35aab505-1a5e-463c-a9af-ea76e2b866de\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5m226" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.685070 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.704330 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.716864 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/35aab505-1a5e-463c-a9af-ea76e2b866de-profile-collector-cert\") pod \"olm-operator-6b444d44fb-5m226\" (UID: \"35aab505-1a5e-463c-a9af-ea76e2b866de\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5m226" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.725216 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.744805 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.766599 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.771571 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/28d1a6e7-60cf-4233-9298-4a561b105271-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-cbkxk\" (UID: \"28d1a6e7-60cf-4233-9298-4a561b105271\") " pod="openshift-marketplace/marketplace-operator-79b997595-cbkxk" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.790326 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.799343 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/28d1a6e7-60cf-4233-9298-4a561b105271-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-cbkxk\" (UID: \"28d1a6e7-60cf-4233-9298-4a561b105271\") " pod="openshift-marketplace/marketplace-operator-79b997595-cbkxk" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.804275 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.844999 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.864746 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.884772 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.904897 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.924265 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.945765 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.965389 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 24 07:59:58 crc kubenswrapper[4691]: I1124 07:59:58.985044 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.005877 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.025414 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.045176 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.065592 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.084790 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.105292 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.126415 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.145078 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.164713 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.185008 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.204693 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.224384 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.243933 4691 request.go:700] Waited for 1.00122916s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-storage-version-migrator-operator/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&limit=500&resourceVersion=0 Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.245361 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.264347 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.284901 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.304228 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.325165 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.345031 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.364773 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.384968 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.405108 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.425094 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.445364 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.465334 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.485620 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.504744 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.525623 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.545094 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.565106 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.584079 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.605217 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.624786 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.644746 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.665574 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.684836 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.705186 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.725152 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.744694 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.765913 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.785363 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.805316 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.824427 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.844496 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.874931 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.885539 4691 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.905593 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.924589 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.965478 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 24 07:59:59 crc kubenswrapper[4691]: I1124 07:59:59.985411 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.005018 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.024376 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.044337 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.064786 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.085355 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.105090 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.117600 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.124743 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.124998 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399505-btzlm"] Nov 24 08:00:00 crc kubenswrapper[4691]: E1124 08:00:00.125318 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[config-volume kube-api-access-xcm26 secret-volume], unattached volumes=[], failed to process volumes=[config-volume kube-api-access-xcm26 secret-volume]: context canceled" pod="openshift-operator-lifecycle-manager/collect-profiles-29399505-btzlm" podUID="6b5b763e-263b-49e8-80dd-6be14733ef80" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.139279 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399520-x8l49"] Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.140054 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399520-x8l49" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.145982 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.147368 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399520-x8l49"] Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.182169 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lbkfn\" (UniqueName: \"kubernetes.io/projected/28d1a6e7-60cf-4233-9298-4a561b105271-kube-api-access-lbkfn\") pod \"marketplace-operator-79b997595-cbkxk\" (UID: \"28d1a6e7-60cf-4233-9298-4a561b105271\") " pod="openshift-marketplace/marketplace-operator-79b997595-cbkxk" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.198778 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-cbkxk" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.205086 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b6sjm\" (UniqueName: \"kubernetes.io/projected/6d32b123-7986-4bd2-abdf-b8be8c855817-kube-api-access-b6sjm\") pod \"route-controller-manager-6576b87f9c-zxfcj\" (UID: \"6d32b123-7986-4bd2-abdf-b8be8c855817\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zxfcj" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.225517 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f29fbcd4-390b-4c16-8d35-331bf36b835a-bound-sa-token\") pod \"ingress-operator-5b745b69d9-mdm27\" (UID: \"f29fbcd4-390b-4c16-8d35-331bf36b835a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mdm27" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.240330 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6xjtp\" (UniqueName: \"kubernetes.io/projected/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-kube-api-access-6xjtp\") pod \"console-f9d7485db-h9wgf\" (UID: \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\") " pod="openshift-console/console-f9d7485db-h9wgf" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.259095 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nwmc8\" (UniqueName: \"kubernetes.io/projected/a556a2eb-6c10-4dd1-ad6e-5cc4084a4497-kube-api-access-nwmc8\") pod \"dns-operator-744455d44c-gs8l7\" (UID: \"a556a2eb-6c10-4dd1-ad6e-5cc4084a4497\") " pod="openshift-dns-operator/dns-operator-744455d44c-gs8l7" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.262976 4691 request.go:700] Waited for 1.851813698s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-operator-lifecycle-manager/serviceaccounts/olm-operator-serviceaccount/token Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.263080 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zxfcj" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.279972 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmtrh\" (UniqueName: \"kubernetes.io/projected/35aab505-1a5e-463c-a9af-ea76e2b866de-kube-api-access-zmtrh\") pod \"olm-operator-6b444d44fb-5m226\" (UID: \"35aab505-1a5e-463c-a9af-ea76e2b866de\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5m226" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.301650 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sl9kp\" (UniqueName: \"kubernetes.io/projected/0577b42c-04f7-4ebe-afda-6b968475f302-kube-api-access-sl9kp\") pod \"apiserver-7bbb656c7d-m459v\" (UID: \"0577b42c-04f7-4ebe-afda-6b968475f302\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.318484 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2ccck\" (UniqueName: \"kubernetes.io/projected/278ddf9e-14a8-43dd-820a-bfda668bbce1-kube-api-access-2ccck\") pod \"openshift-config-operator-7777fb866f-sph7r\" (UID: \"278ddf9e-14a8-43dd-820a-bfda668bbce1\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-sph7r" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.341522 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ksmlf\" (UniqueName: \"kubernetes.io/projected/2d405da4-b29b-4e37-8871-a7db10fdc8d5-kube-api-access-ksmlf\") pod \"openshift-apiserver-operator-796bbdcf4f-5vnfr\" (UID: \"2d405da4-b29b-4e37-8871-a7db10fdc8d5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5vnfr" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.355764 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.358717 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vmtwh\" (UniqueName: \"kubernetes.io/projected/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-kube-api-access-vmtwh\") pod \"oauth-openshift-558db77b4-h2rcj\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.374888 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-sph7r" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.383736 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-cbkxk"] Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.384015 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0935333c-8a58-4595-806a-d765d455f44c-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-s85fr\" (UID: \"0935333c-8a58-4595-806a-d765d455f44c\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-s85fr" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.396506 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.401100 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nhzrj\" (UniqueName: \"kubernetes.io/projected/4d74fbd6-b66d-4ad5-8bf7-06ce9451fa96-kube-api-access-nhzrj\") pod \"cluster-samples-operator-665b6dd947-qx8cv\" (UID: \"4d74fbd6-b66d-4ad5-8bf7-06ce9451fa96\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qx8cv" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.408388 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-h9wgf" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.420565 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-zxfcj"] Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.421648 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-86n2d\" (UniqueName: \"kubernetes.io/projected/1b8890e7-15c8-4467-b31b-493b565c584a-kube-api-access-86n2d\") pod \"downloads-7954f5f757-7cng4\" (UID: \"1b8890e7-15c8-4467-b31b-493b565c584a\") " pod="openshift-console/downloads-7954f5f757-7cng4" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.424131 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5vnfr" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.445778 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fxj7w\" (UniqueName: \"kubernetes.io/projected/c182330a-7ed5-4b31-8d80-d348e821c749-kube-api-access-fxj7w\") pod \"machine-approver-56656f9798-fc99x\" (UID: \"c182330a-7ed5-4b31-8d80-d348e821c749\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fc99x" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.459198 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qx8cv" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.469985 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-gs8l7" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.475202 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cwktd\" (UniqueName: \"kubernetes.io/projected/7dd994cd-9276-4e56-aa73-db6007804b05-kube-api-access-cwktd\") pod \"console-operator-58897d9998-zw9jm\" (UID: \"7dd994cd-9276-4e56-aa73-db6007804b05\") " pod="openshift-console-operator/console-operator-58897d9998-zw9jm" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.480035 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cnpc7\" (UniqueName: \"kubernetes.io/projected/4db5010a-367e-473a-8e9e-56febfd76781-kube-api-access-cnpc7\") pod \"apiserver-76f77b778f-6b2ss\" (UID: \"4db5010a-367e-473a-8e9e-56febfd76781\") " pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.486103 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-s85fr" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.492696 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5m226" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.509932 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7bbq6\" (UniqueName: \"kubernetes.io/projected/4e3edb7c-7fcb-4525-975e-227838552f54-kube-api-access-7bbq6\") pod \"migrator-59844c95c7-k5dmp\" (UID: \"4e3edb7c-7fcb-4525-975e-227838552f54\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-k5dmp" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.520245 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lhrzm\" (UniqueName: \"kubernetes.io/projected/f29fbcd4-390b-4c16-8d35-331bf36b835a-kube-api-access-lhrzm\") pod \"ingress-operator-5b745b69d9-mdm27\" (UID: \"f29fbcd4-390b-4c16-8d35-331bf36b835a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mdm27" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.546326 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cwr6\" (UniqueName: \"kubernetes.io/projected/a1312328-61e0-4c2a-9cf1-dd3c72ebbf4b-kube-api-access-7cwr6\") pod \"authentication-operator-69f744f599-ljnvj\" (UID: \"a1312328-61e0-4c2a-9cf1-dd3c72ebbf4b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ljnvj" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.555308 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v"] Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.560100 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x8f2n\" (UniqueName: \"kubernetes.io/projected/12724cb5-e0ed-4c92-93e6-0f223dd11bea-kube-api-access-x8f2n\") pod \"machine-api-operator-5694c8668f-m8bj7\" (UID: \"12724cb5-e0ed-4c92-93e6-0f223dd11bea\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-m8bj7" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.560126 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-ljnvj" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.579694 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fc99x" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.585514 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8678w\" (UniqueName: \"kubernetes.io/projected/a0e11694-91c8-4c71-88a5-f78c1285acb6-kube-api-access-8678w\") pod \"etcd-operator-b45778765-6n9v7\" (UID: \"a0e11694-91c8-4c71-88a5-f78c1285acb6\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6n9v7" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.604355 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-sph7r"] Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.614795 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rf5b4\" (UniqueName: \"kubernetes.io/projected/b7afb5cd-ee13-48a2-9d6f-14ce967d98fe-kube-api-access-rf5b4\") pod \"cluster-image-registry-operator-dc59b4c8b-rs5qs\" (UID: \"b7afb5cd-ee13-48a2-9d6f-14ce967d98fe\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rs5qs" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.631155 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b7afb5cd-ee13-48a2-9d6f-14ce967d98fe-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-rs5qs\" (UID: \"b7afb5cd-ee13-48a2-9d6f-14ce967d98fe\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rs5qs" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.641622 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v" event={"ID":"0577b42c-04f7-4ebe-afda-6b968475f302","Type":"ContainerStarted","Data":"4113498c5991e47db673af1ac75e35149b9177adec7ba8ee165800f090bf0569"} Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.645364 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-cbkxk" event={"ID":"28d1a6e7-60cf-4233-9298-4a561b105271","Type":"ContainerStarted","Data":"3fad668274b2906cb8bc8f84b7d0b0ef3e5fc19797c064fd2e48ef1cdeec7bf4"} Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.645438 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-cbkxk" event={"ID":"28d1a6e7-60cf-4233-9298-4a561b105271","Type":"ContainerStarted","Data":"73f4c8fee0abea023921be454c9d0e67beff15e409cf6c67272177e02fcddd9c"} Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.645477 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-cbkxk" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.646131 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45qj5\" (UniqueName: \"kubernetes.io/projected/53aa0d7f-022e-46a2-9e47-442eca753bbc-kube-api-access-45qj5\") pod \"controller-manager-879f6c89f-7p2br\" (UID: \"53aa0d7f-022e-46a2-9e47-442eca753bbc\") " pod="openshift-controller-manager/controller-manager-879f6c89f-7p2br" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.647382 4691 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-cbkxk container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/healthz\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.647422 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-cbkxk" podUID="28d1a6e7-60cf-4233-9298-4a561b105271" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.31:8080/healthz\": dial tcp 10.217.0.31:8080: connect: connection refused" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.663126 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zxfcj" event={"ID":"6d32b123-7986-4bd2-abdf-b8be8c855817","Type":"ContainerStarted","Data":"deb8ba6928cbb20c87380a660cddedb7530cce603e31132aecb1352fceadb165"} Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.663175 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399505-btzlm" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.675321 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-7cng4" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.682603 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-h2rcj"] Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.686762 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-m8bj7" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.700200 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399505-btzlm" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.716635 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-zw9jm" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.732137 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5vnfr"] Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.735926 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.745181 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-7p2br" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.754940 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mdm27" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.755880 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/9c8dc88a-6052-4b16-90e4-7377d8d6969d-srv-cert\") pod \"catalog-operator-68c6474976-vq867\" (UID: \"9c8dc88a-6052-4b16-90e4-7377d8d6969d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-vq867" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.755946 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.755980 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cde6026c-736d-47f2-ab64-deb47de62820-trusted-ca\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.756057 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bd92380d-9848-49d8-9feb-07b71c7729bb-apiservice-cert\") pod \"packageserver-d55dfcdfc-q96bx\" (UID: \"bd92380d-9848-49d8-9feb-07b71c7729bb\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-q96bx" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.756137 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/85d176b6-4ccf-4790-b1c6-08831a67e03c-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-9znlm\" (UID: \"85d176b6-4ccf-4790-b1c6-08831a67e03c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9znlm" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.756197 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2fc1223e-8b42-43c4-9862-41c6026db6de-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-rnx8j\" (UID: \"2fc1223e-8b42-43c4-9862-41c6026db6de\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rnx8j" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.756244 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/4149c672-979a-4602-9f51-d0718d65d99a-auth-proxy-config\") pod \"machine-config-operator-74547568cd-744kk\" (UID: \"4149c672-979a-4602-9f51-d0718d65d99a\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-744kk" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.756269 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/85d176b6-4ccf-4790-b1c6-08831a67e03c-config\") pod \"kube-controller-manager-operator-78b949d7b-9znlm\" (UID: \"85d176b6-4ccf-4790-b1c6-08831a67e03c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9znlm" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.756292 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6l9rd\" (UniqueName: \"kubernetes.io/projected/a8eb4c07-8b4c-44cc-8dbf-f102ef2baa45-kube-api-access-6l9rd\") pod \"multus-admission-controller-857f4d67dd-lsqrb\" (UID: \"a8eb4c07-8b4c-44cc-8dbf-f102ef2baa45\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-lsqrb" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.756327 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cde6026c-736d-47f2-ab64-deb47de62820-bound-sa-token\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.756348 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/4149c672-979a-4602-9f51-d0718d65d99a-proxy-tls\") pod \"machine-config-operator-74547568cd-744kk\" (UID: \"4149c672-979a-4602-9f51-d0718d65d99a\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-744kk" Nov 24 08:00:00 crc kubenswrapper[4691]: E1124 08:00:00.756413 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:01.256395862 +0000 UTC m=+163.255345111 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.756492 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cde6026c-736d-47f2-ab64-deb47de62820-ca-trust-extracted\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.756519 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bfrh7\" (UniqueName: \"kubernetes.io/projected/9c8dc88a-6052-4b16-90e4-7377d8d6969d-kube-api-access-bfrh7\") pod \"catalog-operator-68c6474976-vq867\" (UID: \"9c8dc88a-6052-4b16-90e4-7377d8d6969d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-vq867" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.756570 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/f1001a2a-f7b8-46cc-b8e8-852fffb997e5-csi-data-dir\") pod \"csi-hostpathplugin-22hbl\" (UID: \"f1001a2a-f7b8-46cc-b8e8-852fffb997e5\") " pod="hostpath-provisioner/csi-hostpathplugin-22hbl" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.756600 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6cb2d489-7d74-446f-9173-7bc5f2ff32c4-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-7rgnp\" (UID: \"6cb2d489-7d74-446f-9173-7bc5f2ff32c4\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7rgnp" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.757223 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2nn5m\" (UniqueName: \"kubernetes.io/projected/4149c672-979a-4602-9f51-d0718d65d99a-kube-api-access-2nn5m\") pod \"machine-config-operator-74547568cd-744kk\" (UID: \"4149c672-979a-4602-9f51-d0718d65d99a\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-744kk" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.757255 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2fc1223e-8b42-43c4-9862-41c6026db6de-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-rnx8j\" (UID: \"2fc1223e-8b42-43c4-9862-41c6026db6de\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rnx8j" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.757277 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7ff52e52-aab5-4850-9d4d-4f427689c82b-metrics-certs\") pod \"router-default-5444994796-njfp2\" (UID: \"7ff52e52-aab5-4850-9d4d-4f427689c82b\") " pod="openshift-ingress/router-default-5444994796-njfp2" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.758687 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/9c8dc88a-6052-4b16-90e4-7377d8d6969d-profile-collector-cert\") pod \"catalog-operator-68c6474976-vq867\" (UID: \"9c8dc88a-6052-4b16-90e4-7377d8d6969d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-vq867" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.758725 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/bd92380d-9848-49d8-9feb-07b71c7729bb-tmpfs\") pod \"packageserver-d55dfcdfc-q96bx\" (UID: \"bd92380d-9848-49d8-9feb-07b71c7729bb\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-q96bx" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.758755 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bd92380d-9848-49d8-9feb-07b71c7729bb-webhook-cert\") pod \"packageserver-d55dfcdfc-q96bx\" (UID: \"bd92380d-9848-49d8-9feb-07b71c7729bb\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-q96bx" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.758782 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f6239ed-b85f-4648-b4bf-aedce4312e26-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-wn248\" (UID: \"4f6239ed-b85f-4648-b4bf-aedce4312e26\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wn248" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.758835 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4149c672-979a-4602-9f51-d0718d65d99a-images\") pod \"machine-config-operator-74547568cd-744kk\" (UID: \"4149c672-979a-4602-9f51-d0718d65d99a\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-744kk" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.759147 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d61589bf-88a6-4d18-97c8-b59460323cca-config\") pod \"service-ca-operator-777779d784-7shsd\" (UID: \"d61589bf-88a6-4d18-97c8-b59460323cca\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-7shsd" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.759344 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/d7748b2b-46c9-4709-bb46-545d8209bb5f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-4npq9\" (UID: \"d7748b2b-46c9-4709-bb46-545d8209bb5f\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4npq9" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.759418 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/f1001a2a-f7b8-46cc-b8e8-852fffb997e5-plugins-dir\") pod \"csi-hostpathplugin-22hbl\" (UID: \"f1001a2a-f7b8-46cc-b8e8-852fffb997e5\") " pod="hostpath-provisioner/csi-hostpathplugin-22hbl" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.759556 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/85d176b6-4ccf-4790-b1c6-08831a67e03c-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-9znlm\" (UID: \"85d176b6-4ccf-4790-b1c6-08831a67e03c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9znlm" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.759611 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cde6026c-736d-47f2-ab64-deb47de62820-registry-certificates\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.759769 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7msrl\" (UniqueName: \"kubernetes.io/projected/d61589bf-88a6-4d18-97c8-b59460323cca-kube-api-access-7msrl\") pod \"service-ca-operator-777779d784-7shsd\" (UID: \"d61589bf-88a6-4d18-97c8-b59460323cca\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-7shsd" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.759849 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-84hrr\" (UniqueName: \"kubernetes.io/projected/f1001a2a-f7b8-46cc-b8e8-852fffb997e5-kube-api-access-84hrr\") pod \"csi-hostpathplugin-22hbl\" (UID: \"f1001a2a-f7b8-46cc-b8e8-852fffb997e5\") " pod="hostpath-provisioner/csi-hostpathplugin-22hbl" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.759873 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/f1001a2a-f7b8-46cc-b8e8-852fffb997e5-registration-dir\") pod \"csi-hostpathplugin-22hbl\" (UID: \"f1001a2a-f7b8-46cc-b8e8-852fffb997e5\") " pod="hostpath-provisioner/csi-hostpathplugin-22hbl" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.759893 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pjlc8\" (UniqueName: \"kubernetes.io/projected/7ff52e52-aab5-4850-9d4d-4f427689c82b-kube-api-access-pjlc8\") pod \"router-default-5444994796-njfp2\" (UID: \"7ff52e52-aab5-4850-9d4d-4f427689c82b\") " pod="openshift-ingress/router-default-5444994796-njfp2" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.759926 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/472a00a0-2e80-44f7-8857-348d3d88ab01-signing-key\") pod \"service-ca-9c57cc56f-c8zrh\" (UID: \"472a00a0-2e80-44f7-8857-348d3d88ab01\") " pod="openshift-service-ca/service-ca-9c57cc56f-c8zrh" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.759985 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cde6026c-736d-47f2-ab64-deb47de62820-registry-tls\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.760009 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lbr2p\" (UniqueName: \"kubernetes.io/projected/d7748b2b-46c9-4709-bb46-545d8209bb5f-kube-api-access-lbr2p\") pod \"control-plane-machine-set-operator-78cbb6b69f-4npq9\" (UID: \"d7748b2b-46c9-4709-bb46-545d8209bb5f\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4npq9" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.760030 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6cb2d489-7d74-446f-9173-7bc5f2ff32c4-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-7rgnp\" (UID: \"6cb2d489-7d74-446f-9173-7bc5f2ff32c4\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7rgnp" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.760130 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/5ccb619a-2f5c-4b42-9dbc-00479b290b3a-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-tqr2v\" (UID: \"5ccb619a-2f5c-4b42-9dbc-00479b290b3a\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tqr2v" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.760149 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/472a00a0-2e80-44f7-8857-348d3d88ab01-signing-cabundle\") pod \"service-ca-9c57cc56f-c8zrh\" (UID: \"472a00a0-2e80-44f7-8857-348d3d88ab01\") " pod="openshift-service-ca/service-ca-9c57cc56f-c8zrh" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.760203 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-659hv\" (UniqueName: \"kubernetes.io/projected/5ccb619a-2f5c-4b42-9dbc-00479b290b3a-kube-api-access-659hv\") pod \"package-server-manager-789f6589d5-tqr2v\" (UID: \"5ccb619a-2f5c-4b42-9dbc-00479b290b3a\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tqr2v" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.760362 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dcvjk\" (UniqueName: \"kubernetes.io/projected/cde6026c-736d-47f2-ab64-deb47de62820-kube-api-access-dcvjk\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.760421 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4f6239ed-b85f-4648-b4bf-aedce4312e26-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-wn248\" (UID: \"4f6239ed-b85f-4648-b4bf-aedce4312e26\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wn248" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.762419 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5dvd2\" (UniqueName: \"kubernetes.io/projected/2fc1223e-8b42-43c4-9862-41c6026db6de-kube-api-access-5dvd2\") pod \"kube-storage-version-migrator-operator-b67b599dd-rnx8j\" (UID: \"2fc1223e-8b42-43c4-9862-41c6026db6de\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rnx8j" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.762540 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4f6239ed-b85f-4648-b4bf-aedce4312e26-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-wn248\" (UID: \"4f6239ed-b85f-4648-b4bf-aedce4312e26\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wn248" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.762563 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/7ff52e52-aab5-4850-9d4d-4f427689c82b-default-certificate\") pod \"router-default-5444994796-njfp2\" (UID: \"7ff52e52-aab5-4850-9d4d-4f427689c82b\") " pod="openshift-ingress/router-default-5444994796-njfp2" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.762644 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cde6026c-736d-47f2-ab64-deb47de62820-installation-pull-secrets\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.763923 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/f1001a2a-f7b8-46cc-b8e8-852fffb997e5-socket-dir\") pod \"csi-hostpathplugin-22hbl\" (UID: \"f1001a2a-f7b8-46cc-b8e8-852fffb997e5\") " pod="hostpath-provisioner/csi-hostpathplugin-22hbl" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.763994 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k88kb\" (UniqueName: \"kubernetes.io/projected/472a00a0-2e80-44f7-8857-348d3d88ab01-kube-api-access-k88kb\") pod \"service-ca-9c57cc56f-c8zrh\" (UID: \"472a00a0-2e80-44f7-8857-348d3d88ab01\") " pod="openshift-service-ca/service-ca-9c57cc56f-c8zrh" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.764033 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7ff52e52-aab5-4850-9d4d-4f427689c82b-service-ca-bundle\") pod \"router-default-5444994796-njfp2\" (UID: \"7ff52e52-aab5-4850-9d4d-4f427689c82b\") " pod="openshift-ingress/router-default-5444994796-njfp2" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.764083 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d61589bf-88a6-4d18-97c8-b59460323cca-serving-cert\") pod \"service-ca-operator-777779d784-7shsd\" (UID: \"d61589bf-88a6-4d18-97c8-b59460323cca\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-7shsd" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.764113 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c592727d-7307-468a-843f-01717f868d4e-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-wrdd6\" (UID: \"c592727d-7307-468a-843f-01717f868d4e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-wrdd6" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.764176 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n44t6\" (UniqueName: \"kubernetes.io/projected/c592727d-7307-468a-843f-01717f868d4e-kube-api-access-n44t6\") pod \"machine-config-controller-84d6567774-wrdd6\" (UID: \"c592727d-7307-468a-843f-01717f868d4e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-wrdd6" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.764196 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x8fgb\" (UniqueName: \"kubernetes.io/projected/6cb2d489-7d74-446f-9173-7bc5f2ff32c4-kube-api-access-x8fgb\") pod \"openshift-controller-manager-operator-756b6f6bc6-7rgnp\" (UID: \"6cb2d489-7d74-446f-9173-7bc5f2ff32c4\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7rgnp" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.764215 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/a8eb4c07-8b4c-44cc-8dbf-f102ef2baa45-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-lsqrb\" (UID: \"a8eb4c07-8b4c-44cc-8dbf-f102ef2baa45\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-lsqrb" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.764234 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xr2cb\" (UniqueName: \"kubernetes.io/projected/bd92380d-9848-49d8-9feb-07b71c7729bb-kube-api-access-xr2cb\") pod \"packageserver-d55dfcdfc-q96bx\" (UID: \"bd92380d-9848-49d8-9feb-07b71c7729bb\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-q96bx" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.764250 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/c592727d-7307-468a-843f-01717f868d4e-proxy-tls\") pod \"machine-config-controller-84d6567774-wrdd6\" (UID: \"c592727d-7307-468a-843f-01717f868d4e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-wrdd6" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.764264 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/7ff52e52-aab5-4850-9d4d-4f427689c82b-stats-auth\") pod \"router-default-5444994796-njfp2\" (UID: \"7ff52e52-aab5-4850-9d4d-4f427689c82b\") " pod="openshift-ingress/router-default-5444994796-njfp2" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.764297 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/f1001a2a-f7b8-46cc-b8e8-852fffb997e5-mountpoint-dir\") pod \"csi-hostpathplugin-22hbl\" (UID: \"f1001a2a-f7b8-46cc-b8e8-852fffb997e5\") " pod="hostpath-provisioner/csi-hostpathplugin-22hbl" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.772635 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-6n9v7" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.778490 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-k5dmp" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.819708 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-s85fr"] Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.839611 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qx8cv"] Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.866864 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867216 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/f1001a2a-f7b8-46cc-b8e8-852fffb997e5-registration-dir\") pod \"csi-hostpathplugin-22hbl\" (UID: \"f1001a2a-f7b8-46cc-b8e8-852fffb997e5\") " pod="hostpath-provisioner/csi-hostpathplugin-22hbl" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867245 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pjlc8\" (UniqueName: \"kubernetes.io/projected/7ff52e52-aab5-4850-9d4d-4f427689c82b-kube-api-access-pjlc8\") pod \"router-default-5444994796-njfp2\" (UID: \"7ff52e52-aab5-4850-9d4d-4f427689c82b\") " pod="openshift-ingress/router-default-5444994796-njfp2" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867266 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-skgjb\" (UniqueName: \"kubernetes.io/projected/72c587e8-bc91-4e8f-a545-e01d47139d1d-kube-api-access-skgjb\") pod \"collect-profiles-29399520-x8l49\" (UID: \"72c587e8-bc91-4e8f-a545-e01d47139d1d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399520-x8l49" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867282 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/472a00a0-2e80-44f7-8857-348d3d88ab01-signing-key\") pod \"service-ca-9c57cc56f-c8zrh\" (UID: \"472a00a0-2e80-44f7-8857-348d3d88ab01\") " pod="openshift-service-ca/service-ca-9c57cc56f-c8zrh" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867300 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cde6026c-736d-47f2-ab64-deb47de62820-registry-tls\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867315 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lbr2p\" (UniqueName: \"kubernetes.io/projected/d7748b2b-46c9-4709-bb46-545d8209bb5f-kube-api-access-lbr2p\") pod \"control-plane-machine-set-operator-78cbb6b69f-4npq9\" (UID: \"d7748b2b-46c9-4709-bb46-545d8209bb5f\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4npq9" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867342 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6cb2d489-7d74-446f-9173-7bc5f2ff32c4-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-7rgnp\" (UID: \"6cb2d489-7d74-446f-9173-7bc5f2ff32c4\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7rgnp" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867378 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/5ccb619a-2f5c-4b42-9dbc-00479b290b3a-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-tqr2v\" (UID: \"5ccb619a-2f5c-4b42-9dbc-00479b290b3a\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tqr2v" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867392 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/472a00a0-2e80-44f7-8857-348d3d88ab01-signing-cabundle\") pod \"service-ca-9c57cc56f-c8zrh\" (UID: \"472a00a0-2e80-44f7-8857-348d3d88ab01\") " pod="openshift-service-ca/service-ca-9c57cc56f-c8zrh" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867411 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-659hv\" (UniqueName: \"kubernetes.io/projected/5ccb619a-2f5c-4b42-9dbc-00479b290b3a-kube-api-access-659hv\") pod \"package-server-manager-789f6589d5-tqr2v\" (UID: \"5ccb619a-2f5c-4b42-9dbc-00479b290b3a\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tqr2v" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867432 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k84z8\" (UniqueName: \"kubernetes.io/projected/a53c972e-9ea8-4b23-b43b-e432f037faec-kube-api-access-k84z8\") pod \"dns-default-sr2d4\" (UID: \"a53c972e-9ea8-4b23-b43b-e432f037faec\") " pod="openshift-dns/dns-default-sr2d4" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867469 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dcvjk\" (UniqueName: \"kubernetes.io/projected/cde6026c-736d-47f2-ab64-deb47de62820-kube-api-access-dcvjk\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867486 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4f6239ed-b85f-4648-b4bf-aedce4312e26-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-wn248\" (UID: \"4f6239ed-b85f-4648-b4bf-aedce4312e26\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wn248" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867504 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5dvd2\" (UniqueName: \"kubernetes.io/projected/2fc1223e-8b42-43c4-9862-41c6026db6de-kube-api-access-5dvd2\") pod \"kube-storage-version-migrator-operator-b67b599dd-rnx8j\" (UID: \"2fc1223e-8b42-43c4-9862-41c6026db6de\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rnx8j" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867523 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4f6239ed-b85f-4648-b4bf-aedce4312e26-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-wn248\" (UID: \"4f6239ed-b85f-4648-b4bf-aedce4312e26\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wn248" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867537 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/7ff52e52-aab5-4850-9d4d-4f427689c82b-default-certificate\") pod \"router-default-5444994796-njfp2\" (UID: \"7ff52e52-aab5-4850-9d4d-4f427689c82b\") " pod="openshift-ingress/router-default-5444994796-njfp2" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867556 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cde6026c-736d-47f2-ab64-deb47de62820-installation-pull-secrets\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867580 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/f1001a2a-f7b8-46cc-b8e8-852fffb997e5-socket-dir\") pod \"csi-hostpathplugin-22hbl\" (UID: \"f1001a2a-f7b8-46cc-b8e8-852fffb997e5\") " pod="hostpath-provisioner/csi-hostpathplugin-22hbl" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867598 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k88kb\" (UniqueName: \"kubernetes.io/projected/472a00a0-2e80-44f7-8857-348d3d88ab01-kube-api-access-k88kb\") pod \"service-ca-9c57cc56f-c8zrh\" (UID: \"472a00a0-2e80-44f7-8857-348d3d88ab01\") " pod="openshift-service-ca/service-ca-9c57cc56f-c8zrh" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867612 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7ff52e52-aab5-4850-9d4d-4f427689c82b-service-ca-bundle\") pod \"router-default-5444994796-njfp2\" (UID: \"7ff52e52-aab5-4850-9d4d-4f427689c82b\") " pod="openshift-ingress/router-default-5444994796-njfp2" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867627 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d61589bf-88a6-4d18-97c8-b59460323cca-serving-cert\") pod \"service-ca-operator-777779d784-7shsd\" (UID: \"d61589bf-88a6-4d18-97c8-b59460323cca\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-7shsd" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867648 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c592727d-7307-468a-843f-01717f868d4e-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-wrdd6\" (UID: \"c592727d-7307-468a-843f-01717f868d4e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-wrdd6" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867668 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n44t6\" (UniqueName: \"kubernetes.io/projected/c592727d-7307-468a-843f-01717f868d4e-kube-api-access-n44t6\") pod \"machine-config-controller-84d6567774-wrdd6\" (UID: \"c592727d-7307-468a-843f-01717f868d4e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-wrdd6" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867689 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x8fgb\" (UniqueName: \"kubernetes.io/projected/6cb2d489-7d74-446f-9173-7bc5f2ff32c4-kube-api-access-x8fgb\") pod \"openshift-controller-manager-operator-756b6f6bc6-7rgnp\" (UID: \"6cb2d489-7d74-446f-9173-7bc5f2ff32c4\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7rgnp" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867706 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/a8eb4c07-8b4c-44cc-8dbf-f102ef2baa45-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-lsqrb\" (UID: \"a8eb4c07-8b4c-44cc-8dbf-f102ef2baa45\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-lsqrb" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867730 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xr2cb\" (UniqueName: \"kubernetes.io/projected/bd92380d-9848-49d8-9feb-07b71c7729bb-kube-api-access-xr2cb\") pod \"packageserver-d55dfcdfc-q96bx\" (UID: \"bd92380d-9848-49d8-9feb-07b71c7729bb\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-q96bx" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867748 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/c592727d-7307-468a-843f-01717f868d4e-proxy-tls\") pod \"machine-config-controller-84d6567774-wrdd6\" (UID: \"c592727d-7307-468a-843f-01717f868d4e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-wrdd6" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867764 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/7ff52e52-aab5-4850-9d4d-4f427689c82b-stats-auth\") pod \"router-default-5444994796-njfp2\" (UID: \"7ff52e52-aab5-4850-9d4d-4f427689c82b\") " pod="openshift-ingress/router-default-5444994796-njfp2" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867779 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/f1001a2a-f7b8-46cc-b8e8-852fffb997e5-mountpoint-dir\") pod \"csi-hostpathplugin-22hbl\" (UID: \"f1001a2a-f7b8-46cc-b8e8-852fffb997e5\") " pod="hostpath-provisioner/csi-hostpathplugin-22hbl" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867796 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/d448c94d-0fa0-42d9-ac9a-eccf42ecc43a-certs\") pod \"machine-config-server-wgf2w\" (UID: \"d448c94d-0fa0-42d9-ac9a-eccf42ecc43a\") " pod="openshift-machine-config-operator/machine-config-server-wgf2w" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867813 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/9c8dc88a-6052-4b16-90e4-7377d8d6969d-srv-cert\") pod \"catalog-operator-68c6474976-vq867\" (UID: \"9c8dc88a-6052-4b16-90e4-7377d8d6969d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-vq867" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867843 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cde6026c-736d-47f2-ab64-deb47de62820-trusted-ca\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867870 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-75xfj\" (UniqueName: \"kubernetes.io/projected/843d6937-3e7e-4d0f-b8a7-163d2cf658eb-kube-api-access-75xfj\") pod \"ingress-canary-w92ph\" (UID: \"843d6937-3e7e-4d0f-b8a7-163d2cf658eb\") " pod="openshift-ingress-canary/ingress-canary-w92ph" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867897 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bd92380d-9848-49d8-9feb-07b71c7729bb-apiservice-cert\") pod \"packageserver-d55dfcdfc-q96bx\" (UID: \"bd92380d-9848-49d8-9feb-07b71c7729bb\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-q96bx" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867920 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/85d176b6-4ccf-4790-b1c6-08831a67e03c-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-9znlm\" (UID: \"85d176b6-4ccf-4790-b1c6-08831a67e03c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9znlm" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867943 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2fc1223e-8b42-43c4-9862-41c6026db6de-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-rnx8j\" (UID: \"2fc1223e-8b42-43c4-9862-41c6026db6de\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rnx8j" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867968 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/4149c672-979a-4602-9f51-d0718d65d99a-auth-proxy-config\") pod \"machine-config-operator-74547568cd-744kk\" (UID: \"4149c672-979a-4602-9f51-d0718d65d99a\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-744kk" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.867984 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/85d176b6-4ccf-4790-b1c6-08831a67e03c-config\") pod \"kube-controller-manager-operator-78b949d7b-9znlm\" (UID: \"85d176b6-4ccf-4790-b1c6-08831a67e03c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9znlm" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.868002 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6l9rd\" (UniqueName: \"kubernetes.io/projected/a8eb4c07-8b4c-44cc-8dbf-f102ef2baa45-kube-api-access-6l9rd\") pod \"multus-admission-controller-857f4d67dd-lsqrb\" (UID: \"a8eb4c07-8b4c-44cc-8dbf-f102ef2baa45\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-lsqrb" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.868016 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cde6026c-736d-47f2-ab64-deb47de62820-bound-sa-token\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.868033 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/4149c672-979a-4602-9f51-d0718d65d99a-proxy-tls\") pod \"machine-config-operator-74547568cd-744kk\" (UID: \"4149c672-979a-4602-9f51-d0718d65d99a\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-744kk" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.868048 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cde6026c-736d-47f2-ab64-deb47de62820-ca-trust-extracted\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.869012 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c592727d-7307-468a-843f-01717f868d4e-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-wrdd6\" (UID: \"c592727d-7307-468a-843f-01717f868d4e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-wrdd6" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.869267 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/f1001a2a-f7b8-46cc-b8e8-852fffb997e5-registration-dir\") pod \"csi-hostpathplugin-22hbl\" (UID: \"f1001a2a-f7b8-46cc-b8e8-852fffb997e5\") " pod="hostpath-provisioner/csi-hostpathplugin-22hbl" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.869885 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cde6026c-736d-47f2-ab64-deb47de62820-trusted-ca\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.871563 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/f1001a2a-f7b8-46cc-b8e8-852fffb997e5-socket-dir\") pod \"csi-hostpathplugin-22hbl\" (UID: \"f1001a2a-f7b8-46cc-b8e8-852fffb997e5\") " pod="hostpath-provisioner/csi-hostpathplugin-22hbl" Nov 24 08:00:00 crc kubenswrapper[4691]: E1124 08:00:00.871920 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:01.371900833 +0000 UTC m=+163.370850082 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.871970 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/f1001a2a-f7b8-46cc-b8e8-852fffb997e5-mountpoint-dir\") pod \"csi-hostpathplugin-22hbl\" (UID: \"f1001a2a-f7b8-46cc-b8e8-852fffb997e5\") " pod="hostpath-provisioner/csi-hostpathplugin-22hbl" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.868064 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vcw7x\" (UniqueName: \"kubernetes.io/projected/d448c94d-0fa0-42d9-ac9a-eccf42ecc43a-kube-api-access-vcw7x\") pod \"machine-config-server-wgf2w\" (UID: \"d448c94d-0fa0-42d9-ac9a-eccf42ecc43a\") " pod="openshift-machine-config-operator/machine-config-server-wgf2w" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.872103 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bfrh7\" (UniqueName: \"kubernetes.io/projected/9c8dc88a-6052-4b16-90e4-7377d8d6969d-kube-api-access-bfrh7\") pod \"catalog-operator-68c6474976-vq867\" (UID: \"9c8dc88a-6052-4b16-90e4-7377d8d6969d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-vq867" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.872137 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/72c587e8-bc91-4e8f-a545-e01d47139d1d-config-volume\") pod \"collect-profiles-29399520-x8l49\" (UID: \"72c587e8-bc91-4e8f-a545-e01d47139d1d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399520-x8l49" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.872166 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/d448c94d-0fa0-42d9-ac9a-eccf42ecc43a-node-bootstrap-token\") pod \"machine-config-server-wgf2w\" (UID: \"d448c94d-0fa0-42d9-ac9a-eccf42ecc43a\") " pod="openshift-machine-config-operator/machine-config-server-wgf2w" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.872204 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a53c972e-9ea8-4b23-b43b-e432f037faec-config-volume\") pod \"dns-default-sr2d4\" (UID: \"a53c972e-9ea8-4b23-b43b-e432f037faec\") " pod="openshift-dns/dns-default-sr2d4" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.872229 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/f1001a2a-f7b8-46cc-b8e8-852fffb997e5-csi-data-dir\") pod \"csi-hostpathplugin-22hbl\" (UID: \"f1001a2a-f7b8-46cc-b8e8-852fffb997e5\") " pod="hostpath-provisioner/csi-hostpathplugin-22hbl" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.872261 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6cb2d489-7d74-446f-9173-7bc5f2ff32c4-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-7rgnp\" (UID: \"6cb2d489-7d74-446f-9173-7bc5f2ff32c4\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7rgnp" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.872286 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2nn5m\" (UniqueName: \"kubernetes.io/projected/4149c672-979a-4602-9f51-d0718d65d99a-kube-api-access-2nn5m\") pod \"machine-config-operator-74547568cd-744kk\" (UID: \"4149c672-979a-4602-9f51-d0718d65d99a\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-744kk" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.872304 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2fc1223e-8b42-43c4-9862-41c6026db6de-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-rnx8j\" (UID: \"2fc1223e-8b42-43c4-9862-41c6026db6de\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rnx8j" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.872325 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7ff52e52-aab5-4850-9d4d-4f427689c82b-metrics-certs\") pod \"router-default-5444994796-njfp2\" (UID: \"7ff52e52-aab5-4850-9d4d-4f427689c82b\") " pod="openshift-ingress/router-default-5444994796-njfp2" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.872344 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/9c8dc88a-6052-4b16-90e4-7377d8d6969d-profile-collector-cert\") pod \"catalog-operator-68c6474976-vq867\" (UID: \"9c8dc88a-6052-4b16-90e4-7377d8d6969d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-vq867" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.872370 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/bd92380d-9848-49d8-9feb-07b71c7729bb-tmpfs\") pod \"packageserver-d55dfcdfc-q96bx\" (UID: \"bd92380d-9848-49d8-9feb-07b71c7729bb\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-q96bx" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.872390 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bd92380d-9848-49d8-9feb-07b71c7729bb-webhook-cert\") pod \"packageserver-d55dfcdfc-q96bx\" (UID: \"bd92380d-9848-49d8-9feb-07b71c7729bb\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-q96bx" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.872409 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f6239ed-b85f-4648-b4bf-aedce4312e26-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-wn248\" (UID: \"4f6239ed-b85f-4648-b4bf-aedce4312e26\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wn248" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.872469 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4149c672-979a-4602-9f51-d0718d65d99a-images\") pod \"machine-config-operator-74547568cd-744kk\" (UID: \"4149c672-979a-4602-9f51-d0718d65d99a\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-744kk" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.872498 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d61589bf-88a6-4d18-97c8-b59460323cca-config\") pod \"service-ca-operator-777779d784-7shsd\" (UID: \"d61589bf-88a6-4d18-97c8-b59460323cca\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-7shsd" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.872532 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/d7748b2b-46c9-4709-bb46-545d8209bb5f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-4npq9\" (UID: \"d7748b2b-46c9-4709-bb46-545d8209bb5f\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4npq9" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.872555 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/f1001a2a-f7b8-46cc-b8e8-852fffb997e5-plugins-dir\") pod \"csi-hostpathplugin-22hbl\" (UID: \"f1001a2a-f7b8-46cc-b8e8-852fffb997e5\") " pod="hostpath-provisioner/csi-hostpathplugin-22hbl" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.872572 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a53c972e-9ea8-4b23-b43b-e432f037faec-metrics-tls\") pod \"dns-default-sr2d4\" (UID: \"a53c972e-9ea8-4b23-b43b-e432f037faec\") " pod="openshift-dns/dns-default-sr2d4" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.872596 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/85d176b6-4ccf-4790-b1c6-08831a67e03c-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-9znlm\" (UID: \"85d176b6-4ccf-4790-b1c6-08831a67e03c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9znlm" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.872618 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cde6026c-736d-47f2-ab64-deb47de62820-registry-certificates\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.872666 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/72c587e8-bc91-4e8f-a545-e01d47139d1d-secret-volume\") pod \"collect-profiles-29399520-x8l49\" (UID: \"72c587e8-bc91-4e8f-a545-e01d47139d1d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399520-x8l49" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.872707 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7msrl\" (UniqueName: \"kubernetes.io/projected/d61589bf-88a6-4d18-97c8-b59460323cca-kube-api-access-7msrl\") pod \"service-ca-operator-777779d784-7shsd\" (UID: \"d61589bf-88a6-4d18-97c8-b59460323cca\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-7shsd" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.872728 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/843d6937-3e7e-4d0f-b8a7-163d2cf658eb-cert\") pod \"ingress-canary-w92ph\" (UID: \"843d6937-3e7e-4d0f-b8a7-163d2cf658eb\") " pod="openshift-ingress-canary/ingress-canary-w92ph" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.872763 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-84hrr\" (UniqueName: \"kubernetes.io/projected/f1001a2a-f7b8-46cc-b8e8-852fffb997e5-kube-api-access-84hrr\") pod \"csi-hostpathplugin-22hbl\" (UID: \"f1001a2a-f7b8-46cc-b8e8-852fffb997e5\") " pod="hostpath-provisioner/csi-hostpathplugin-22hbl" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.875727 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6cb2d489-7d74-446f-9173-7bc5f2ff32c4-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-7rgnp\" (UID: \"6cb2d489-7d74-446f-9173-7bc5f2ff32c4\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7rgnp" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.877094 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/472a00a0-2e80-44f7-8857-348d3d88ab01-signing-cabundle\") pod \"service-ca-9c57cc56f-c8zrh\" (UID: \"472a00a0-2e80-44f7-8857-348d3d88ab01\") " pod="openshift-service-ca/service-ca-9c57cc56f-c8zrh" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.879682 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7ff52e52-aab5-4850-9d4d-4f427689c82b-service-ca-bundle\") pod \"router-default-5444994796-njfp2\" (UID: \"7ff52e52-aab5-4850-9d4d-4f427689c82b\") " pod="openshift-ingress/router-default-5444994796-njfp2" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.879701 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/7ff52e52-aab5-4850-9d4d-4f427689c82b-stats-auth\") pod \"router-default-5444994796-njfp2\" (UID: \"7ff52e52-aab5-4850-9d4d-4f427689c82b\") " pod="openshift-ingress/router-default-5444994796-njfp2" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.880723 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/a8eb4c07-8b4c-44cc-8dbf-f102ef2baa45-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-lsqrb\" (UID: \"a8eb4c07-8b4c-44cc-8dbf-f102ef2baa45\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-lsqrb" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.882040 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/bd92380d-9848-49d8-9feb-07b71c7729bb-tmpfs\") pod \"packageserver-d55dfcdfc-q96bx\" (UID: \"bd92380d-9848-49d8-9feb-07b71c7729bb\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-q96bx" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.882437 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cde6026c-736d-47f2-ab64-deb47de62820-installation-pull-secrets\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.882761 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/5ccb619a-2f5c-4b42-9dbc-00479b290b3a-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-tqr2v\" (UID: \"5ccb619a-2f5c-4b42-9dbc-00479b290b3a\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tqr2v" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.883175 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cde6026c-736d-47f2-ab64-deb47de62820-ca-trust-extracted\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.883385 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4f6239ed-b85f-4648-b4bf-aedce4312e26-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-wn248\" (UID: \"4f6239ed-b85f-4648-b4bf-aedce4312e26\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wn248" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.884145 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/4149c672-979a-4602-9f51-d0718d65d99a-auth-proxy-config\") pod \"machine-config-operator-74547568cd-744kk\" (UID: \"4149c672-979a-4602-9f51-d0718d65d99a\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-744kk" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.885667 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/d7748b2b-46c9-4709-bb46-545d8209bb5f-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-4npq9\" (UID: \"d7748b2b-46c9-4709-bb46-545d8209bb5f\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4npq9" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.886961 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/85d176b6-4ccf-4790-b1c6-08831a67e03c-config\") pod \"kube-controller-manager-operator-78b949d7b-9znlm\" (UID: \"85d176b6-4ccf-4790-b1c6-08831a67e03c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9znlm" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.888242 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cde6026c-736d-47f2-ab64-deb47de62820-registry-certificates\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.889721 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/f1001a2a-f7b8-46cc-b8e8-852fffb997e5-plugins-dir\") pod \"csi-hostpathplugin-22hbl\" (UID: \"f1001a2a-f7b8-46cc-b8e8-852fffb997e5\") " pod="hostpath-provisioner/csi-hostpathplugin-22hbl" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.889798 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/f1001a2a-f7b8-46cc-b8e8-852fffb997e5-csi-data-dir\") pod \"csi-hostpathplugin-22hbl\" (UID: \"f1001a2a-f7b8-46cc-b8e8-852fffb997e5\") " pod="hostpath-provisioner/csi-hostpathplugin-22hbl" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.892063 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f6239ed-b85f-4648-b4bf-aedce4312e26-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-wn248\" (UID: \"4f6239ed-b85f-4648-b4bf-aedce4312e26\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wn248" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.892343 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/472a00a0-2e80-44f7-8857-348d3d88ab01-signing-key\") pod \"service-ca-9c57cc56f-c8zrh\" (UID: \"472a00a0-2e80-44f7-8857-348d3d88ab01\") " pod="openshift-service-ca/service-ca-9c57cc56f-c8zrh" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.892556 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d61589bf-88a6-4d18-97c8-b59460323cca-config\") pod \"service-ca-operator-777779d784-7shsd\" (UID: \"d61589bf-88a6-4d18-97c8-b59460323cca\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-7shsd" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.910159 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bd92380d-9848-49d8-9feb-07b71c7729bb-apiservice-cert\") pod \"packageserver-d55dfcdfc-q96bx\" (UID: \"bd92380d-9848-49d8-9feb-07b71c7729bb\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-q96bx" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.910378 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cde6026c-736d-47f2-ab64-deb47de62820-registry-tls\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.911541 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d61589bf-88a6-4d18-97c8-b59460323cca-serving-cert\") pod \"service-ca-operator-777779d784-7shsd\" (UID: \"d61589bf-88a6-4d18-97c8-b59460323cca\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-7shsd" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.911725 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/85d176b6-4ccf-4790-b1c6-08831a67e03c-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-9znlm\" (UID: \"85d176b6-4ccf-4790-b1c6-08831a67e03c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9znlm" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.911744 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/7ff52e52-aab5-4850-9d4d-4f427689c82b-default-certificate\") pod \"router-default-5444994796-njfp2\" (UID: \"7ff52e52-aab5-4850-9d4d-4f427689c82b\") " pod="openshift-ingress/router-default-5444994796-njfp2" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.912313 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/4149c672-979a-4602-9f51-d0718d65d99a-proxy-tls\") pod \"machine-config-operator-74547568cd-744kk\" (UID: \"4149c672-979a-4602-9f51-d0718d65d99a\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-744kk" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.912879 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/9c8dc88a-6052-4b16-90e4-7377d8d6969d-profile-collector-cert\") pod \"catalog-operator-68c6474976-vq867\" (UID: \"9c8dc88a-6052-4b16-90e4-7377d8d6969d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-vq867" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.913864 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6cb2d489-7d74-446f-9173-7bc5f2ff32c4-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-7rgnp\" (UID: \"6cb2d489-7d74-446f-9173-7bc5f2ff32c4\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7rgnp" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.914211 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2fc1223e-8b42-43c4-9862-41c6026db6de-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-rnx8j\" (UID: \"2fc1223e-8b42-43c4-9862-41c6026db6de\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rnx8j" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.914397 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/c592727d-7307-468a-843f-01717f868d4e-proxy-tls\") pod \"machine-config-controller-84d6567774-wrdd6\" (UID: \"c592727d-7307-468a-843f-01717f868d4e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-wrdd6" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.914856 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rs5qs" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.917052 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4149c672-979a-4602-9f51-d0718d65d99a-images\") pod \"machine-config-operator-74547568cd-744kk\" (UID: \"4149c672-979a-4602-9f51-d0718d65d99a\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-744kk" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.918242 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7ff52e52-aab5-4850-9d4d-4f427689c82b-metrics-certs\") pod \"router-default-5444994796-njfp2\" (UID: \"7ff52e52-aab5-4850-9d4d-4f427689c82b\") " pod="openshift-ingress/router-default-5444994796-njfp2" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.918430 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/9c8dc88a-6052-4b16-90e4-7377d8d6969d-srv-cert\") pod \"catalog-operator-68c6474976-vq867\" (UID: \"9c8dc88a-6052-4b16-90e4-7377d8d6969d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-vq867" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.929540 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5m226"] Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.930432 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2fc1223e-8b42-43c4-9862-41c6026db6de-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-rnx8j\" (UID: \"2fc1223e-8b42-43c4-9862-41c6026db6de\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rnx8j" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.934926 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-gs8l7"] Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.935164 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bd92380d-9848-49d8-9feb-07b71c7729bb-webhook-cert\") pod \"packageserver-d55dfcdfc-q96bx\" (UID: \"bd92380d-9848-49d8-9feb-07b71c7729bb\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-q96bx" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.937841 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pjlc8\" (UniqueName: \"kubernetes.io/projected/7ff52e52-aab5-4850-9d4d-4f427689c82b-kube-api-access-pjlc8\") pod \"router-default-5444994796-njfp2\" (UID: \"7ff52e52-aab5-4850-9d4d-4f427689c82b\") " pod="openshift-ingress/router-default-5444994796-njfp2" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.941879 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dcvjk\" (UniqueName: \"kubernetes.io/projected/cde6026c-736d-47f2-ab64-deb47de62820-kube-api-access-dcvjk\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.947208 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-h9wgf"] Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.955978 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4f6239ed-b85f-4648-b4bf-aedce4312e26-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-wn248\" (UID: \"4f6239ed-b85f-4648-b4bf-aedce4312e26\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wn248" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.960640 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5dvd2\" (UniqueName: \"kubernetes.io/projected/2fc1223e-8b42-43c4-9862-41c6026db6de-kube-api-access-5dvd2\") pod \"kube-storage-version-migrator-operator-b67b599dd-rnx8j\" (UID: \"2fc1223e-8b42-43c4-9862-41c6026db6de\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rnx8j" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.961999 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-ljnvj"] Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.978492 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.978528 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-75xfj\" (UniqueName: \"kubernetes.io/projected/843d6937-3e7e-4d0f-b8a7-163d2cf658eb-kube-api-access-75xfj\") pod \"ingress-canary-w92ph\" (UID: \"843d6937-3e7e-4d0f-b8a7-163d2cf658eb\") " pod="openshift-ingress-canary/ingress-canary-w92ph" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.978579 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vcw7x\" (UniqueName: \"kubernetes.io/projected/d448c94d-0fa0-42d9-ac9a-eccf42ecc43a-kube-api-access-vcw7x\") pod \"machine-config-server-wgf2w\" (UID: \"d448c94d-0fa0-42d9-ac9a-eccf42ecc43a\") " pod="openshift-machine-config-operator/machine-config-server-wgf2w" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.978605 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/72c587e8-bc91-4e8f-a545-e01d47139d1d-config-volume\") pod \"collect-profiles-29399520-x8l49\" (UID: \"72c587e8-bc91-4e8f-a545-e01d47139d1d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399520-x8l49" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.978619 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/d448c94d-0fa0-42d9-ac9a-eccf42ecc43a-node-bootstrap-token\") pod \"machine-config-server-wgf2w\" (UID: \"d448c94d-0fa0-42d9-ac9a-eccf42ecc43a\") " pod="openshift-machine-config-operator/machine-config-server-wgf2w" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.978634 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a53c972e-9ea8-4b23-b43b-e432f037faec-config-volume\") pod \"dns-default-sr2d4\" (UID: \"a53c972e-9ea8-4b23-b43b-e432f037faec\") " pod="openshift-dns/dns-default-sr2d4" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.978680 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a53c972e-9ea8-4b23-b43b-e432f037faec-metrics-tls\") pod \"dns-default-sr2d4\" (UID: \"a53c972e-9ea8-4b23-b43b-e432f037faec\") " pod="openshift-dns/dns-default-sr2d4" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.978709 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/72c587e8-bc91-4e8f-a545-e01d47139d1d-secret-volume\") pod \"collect-profiles-29399520-x8l49\" (UID: \"72c587e8-bc91-4e8f-a545-e01d47139d1d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399520-x8l49" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.978730 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/843d6937-3e7e-4d0f-b8a7-163d2cf658eb-cert\") pod \"ingress-canary-w92ph\" (UID: \"843d6937-3e7e-4d0f-b8a7-163d2cf658eb\") " pod="openshift-ingress-canary/ingress-canary-w92ph" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.978754 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-skgjb\" (UniqueName: \"kubernetes.io/projected/72c587e8-bc91-4e8f-a545-e01d47139d1d-kube-api-access-skgjb\") pod \"collect-profiles-29399520-x8l49\" (UID: \"72c587e8-bc91-4e8f-a545-e01d47139d1d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399520-x8l49" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.978794 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k84z8\" (UniqueName: \"kubernetes.io/projected/a53c972e-9ea8-4b23-b43b-e432f037faec-kube-api-access-k84z8\") pod \"dns-default-sr2d4\" (UID: \"a53c972e-9ea8-4b23-b43b-e432f037faec\") " pod="openshift-dns/dns-default-sr2d4" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.978852 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/d448c94d-0fa0-42d9-ac9a-eccf42ecc43a-certs\") pod \"machine-config-server-wgf2w\" (UID: \"d448c94d-0fa0-42d9-ac9a-eccf42ecc43a\") " pod="openshift-machine-config-operator/machine-config-server-wgf2w" Nov 24 08:00:00 crc kubenswrapper[4691]: E1124 08:00:00.980670 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:01.480654267 +0000 UTC m=+163.479603516 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.980723 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/72c587e8-bc91-4e8f-a545-e01d47139d1d-config-volume\") pod \"collect-profiles-29399520-x8l49\" (UID: \"72c587e8-bc91-4e8f-a545-e01d47139d1d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399520-x8l49" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.981116 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a53c972e-9ea8-4b23-b43b-e432f037faec-config-volume\") pod \"dns-default-sr2d4\" (UID: \"a53c972e-9ea8-4b23-b43b-e432f037faec\") " pod="openshift-dns/dns-default-sr2d4" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.983260 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a53c972e-9ea8-4b23-b43b-e432f037faec-metrics-tls\") pod \"dns-default-sr2d4\" (UID: \"a53c972e-9ea8-4b23-b43b-e432f037faec\") " pod="openshift-dns/dns-default-sr2d4" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.984072 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/d448c94d-0fa0-42d9-ac9a-eccf42ecc43a-node-bootstrap-token\") pod \"machine-config-server-wgf2w\" (UID: \"d448c94d-0fa0-42d9-ac9a-eccf42ecc43a\") " pod="openshift-machine-config-operator/machine-config-server-wgf2w" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.986101 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/d448c94d-0fa0-42d9-ac9a-eccf42ecc43a-certs\") pod \"machine-config-server-wgf2w\" (UID: \"d448c94d-0fa0-42d9-ac9a-eccf42ecc43a\") " pod="openshift-machine-config-operator/machine-config-server-wgf2w" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.987009 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/72c587e8-bc91-4e8f-a545-e01d47139d1d-secret-volume\") pod \"collect-profiles-29399520-x8l49\" (UID: \"72c587e8-bc91-4e8f-a545-e01d47139d1d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399520-x8l49" Nov 24 08:00:00 crc kubenswrapper[4691]: I1124 08:00:00.990691 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n44t6\" (UniqueName: \"kubernetes.io/projected/c592727d-7307-468a-843f-01717f868d4e-kube-api-access-n44t6\") pod \"machine-config-controller-84d6567774-wrdd6\" (UID: \"c592727d-7307-468a-843f-01717f868d4e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-wrdd6" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.007926 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/843d6937-3e7e-4d0f-b8a7-163d2cf658eb-cert\") pod \"ingress-canary-w92ph\" (UID: \"843d6937-3e7e-4d0f-b8a7-163d2cf658eb\") " pod="openshift-ingress-canary/ingress-canary-w92ph" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.009354 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-7cng4"] Nov 24 08:00:01 crc kubenswrapper[4691]: W1124 08:00:01.014035 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd611a7c5_68d8_4ea5_88b7_d3fad9baef65.slice/crio-cc6128be54dc3fe4ece5ffe50125204ac63d63c79095b536dab42d4add999264 WatchSource:0}: Error finding container cc6128be54dc3fe4ece5ffe50125204ac63d63c79095b536dab42d4add999264: Status 404 returned error can't find the container with id cc6128be54dc3fe4ece5ffe50125204ac63d63c79095b536dab42d4add999264 Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.014615 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x8fgb\" (UniqueName: \"kubernetes.io/projected/6cb2d489-7d74-446f-9173-7bc5f2ff32c4-kube-api-access-x8fgb\") pod \"openshift-controller-manager-operator-756b6f6bc6-7rgnp\" (UID: \"6cb2d489-7d74-446f-9173-7bc5f2ff32c4\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7rgnp" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.024807 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xr2cb\" (UniqueName: \"kubernetes.io/projected/bd92380d-9848-49d8-9feb-07b71c7729bb-kube-api-access-xr2cb\") pod \"packageserver-d55dfcdfc-q96bx\" (UID: \"bd92380d-9848-49d8-9feb-07b71c7729bb\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-q96bx" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.047105 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k88kb\" (UniqueName: \"kubernetes.io/projected/472a00a0-2e80-44f7-8857-348d3d88ab01-kube-api-access-k88kb\") pod \"service-ca-9c57cc56f-c8zrh\" (UID: \"472a00a0-2e80-44f7-8857-348d3d88ab01\") " pod="openshift-service-ca/service-ca-9c57cc56f-c8zrh" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.064314 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lbr2p\" (UniqueName: \"kubernetes.io/projected/d7748b2b-46c9-4709-bb46-545d8209bb5f-kube-api-access-lbr2p\") pod \"control-plane-machine-set-operator-78cbb6b69f-4npq9\" (UID: \"d7748b2b-46c9-4709-bb46-545d8209bb5f\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4npq9" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.083167 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:01 crc kubenswrapper[4691]: E1124 08:00:01.083797 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:01.583774667 +0000 UTC m=+163.582723916 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.090392 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-6b2ss"] Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.094076 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2nn5m\" (UniqueName: \"kubernetes.io/projected/4149c672-979a-4602-9f51-d0718d65d99a-kube-api-access-2nn5m\") pod \"machine-config-operator-74547568cd-744kk\" (UID: \"4149c672-979a-4602-9f51-d0718d65d99a\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-744kk" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.130485 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-njfp2" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.136688 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-84hrr\" (UniqueName: \"kubernetes.io/projected/f1001a2a-f7b8-46cc-b8e8-852fffb997e5-kube-api-access-84hrr\") pod \"csi-hostpathplugin-22hbl\" (UID: \"f1001a2a-f7b8-46cc-b8e8-852fffb997e5\") " pod="hostpath-provisioner/csi-hostpathplugin-22hbl" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.148359 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6l9rd\" (UniqueName: \"kubernetes.io/projected/a8eb4c07-8b4c-44cc-8dbf-f102ef2baa45-kube-api-access-6l9rd\") pod \"multus-admission-controller-857f4d67dd-lsqrb\" (UID: \"a8eb4c07-8b4c-44cc-8dbf-f102ef2baa45\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-lsqrb" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.169973 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7rgnp" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.173613 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cde6026c-736d-47f2-ab64-deb47de62820-bound-sa-token\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.178040 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-q96bx" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.184585 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rnx8j" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.186815 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:01 crc kubenswrapper[4691]: E1124 08:00:01.191544 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:01.691519862 +0000 UTC m=+163.690469111 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.199219 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-659hv\" (UniqueName: \"kubernetes.io/projected/5ccb619a-2f5c-4b42-9dbc-00479b290b3a-kube-api-access-659hv\") pod \"package-server-manager-789f6589d5-tqr2v\" (UID: \"5ccb619a-2f5c-4b42-9dbc-00479b290b3a\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tqr2v" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.204411 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-zw9jm"] Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.208425 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-wrdd6" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.210864 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7msrl\" (UniqueName: \"kubernetes.io/projected/d61589bf-88a6-4d18-97c8-b59460323cca-kube-api-access-7msrl\") pod \"service-ca-operator-777779d784-7shsd\" (UID: \"d61589bf-88a6-4d18-97c8-b59460323cca\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-7shsd" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.219013 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4npq9" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.227096 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/85d176b6-4ccf-4790-b1c6-08831a67e03c-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-9znlm\" (UID: \"85d176b6-4ccf-4790-b1c6-08831a67e03c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9znlm" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.228151 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-7shsd" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.239510 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wn248" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.242602 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-744kk" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.243408 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bfrh7\" (UniqueName: \"kubernetes.io/projected/9c8dc88a-6052-4b16-90e4-7377d8d6969d-kube-api-access-bfrh7\") pod \"catalog-operator-68c6474976-vq867\" (UID: \"9c8dc88a-6052-4b16-90e4-7377d8d6969d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-vq867" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.243671 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-mdm27"] Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.252016 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-lsqrb" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.260336 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-c8zrh" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.262995 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vcw7x\" (UniqueName: \"kubernetes.io/projected/d448c94d-0fa0-42d9-ac9a-eccf42ecc43a-kube-api-access-vcw7x\") pod \"machine-config-server-wgf2w\" (UID: \"d448c94d-0fa0-42d9-ac9a-eccf42ecc43a\") " pod="openshift-machine-config-operator/machine-config-server-wgf2w" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.283990 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-75xfj\" (UniqueName: \"kubernetes.io/projected/843d6937-3e7e-4d0f-b8a7-163d2cf658eb-kube-api-access-75xfj\") pod \"ingress-canary-w92ph\" (UID: \"843d6937-3e7e-4d0f-b8a7-163d2cf658eb\") " pod="openshift-ingress-canary/ingress-canary-w92ph" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.286769 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-6n9v7"] Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.288423 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-22hbl" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.288514 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:01 crc kubenswrapper[4691]: E1124 08:00:01.288864 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:01.788844004 +0000 UTC m=+163.787793253 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.297553 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-wgf2w" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.305438 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-skgjb\" (UniqueName: \"kubernetes.io/projected/72c587e8-bc91-4e8f-a545-e01d47139d1d-kube-api-access-skgjb\") pod \"collect-profiles-29399520-x8l49\" (UID: \"72c587e8-bc91-4e8f-a545-e01d47139d1d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399520-x8l49" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.317373 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-7p2br"] Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.325761 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-w92ph" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.329553 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k84z8\" (UniqueName: \"kubernetes.io/projected/a53c972e-9ea8-4b23-b43b-e432f037faec-kube-api-access-k84z8\") pod \"dns-default-sr2d4\" (UID: \"a53c972e-9ea8-4b23-b43b-e432f037faec\") " pod="openshift-dns/dns-default-sr2d4" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.354383 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399520-x8l49" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.391342 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:01 crc kubenswrapper[4691]: E1124 08:00:01.391742 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:01.891728827 +0000 UTC m=+163.890678076 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.398239 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-m8bj7"] Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.402265 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-k5dmp"] Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.405625 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rs5qs"] Nov 24 08:00:01 crc kubenswrapper[4691]: W1124 08:00:01.447545 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4e3edb7c_7fcb_4525_975e_227838552f54.slice/crio-a855254f9c4d469e190fb2f0987f018082ed7dbd3e29d3540006268d571e31b6 WatchSource:0}: Error finding container a855254f9c4d469e190fb2f0987f018082ed7dbd3e29d3540006268d571e31b6: Status 404 returned error can't find the container with id a855254f9c4d469e190fb2f0987f018082ed7dbd3e29d3540006268d571e31b6 Nov 24 08:00:01 crc kubenswrapper[4691]: W1124 08:00:01.467848 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd448c94d_0fa0_42d9_ac9a_eccf42ecc43a.slice/crio-38278acd4a9a6b342e8fd92f1dc489a92395c2317526f7112ca608a262645058 WatchSource:0}: Error finding container 38278acd4a9a6b342e8fd92f1dc489a92395c2317526f7112ca608a262645058: Status 404 returned error can't find the container with id 38278acd4a9a6b342e8fd92f1dc489a92395c2317526f7112ca608a262645058 Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.493314 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tqr2v" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.494060 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:01 crc kubenswrapper[4691]: E1124 08:00:01.494493 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:01.994478586 +0000 UTC m=+163.993427835 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.500325 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-vq867" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.522889 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9znlm" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.556663 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7rgnp"] Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.596100 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:01 crc kubenswrapper[4691]: E1124 08:00:01.596660 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:02.096640229 +0000 UTC m=+164.095589478 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.618439 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-sr2d4" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.659337 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-744kk"] Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.674825 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-zw9jm" event={"ID":"7dd994cd-9276-4e56-aa73-db6007804b05","Type":"ContainerStarted","Data":"ed46f8339cb73a6e09b30743a3a6d734f65fb2633b5bd3e1e645c38a40bb2080"} Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.675911 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-k5dmp" event={"ID":"4e3edb7c-7fcb-4525-975e-227838552f54","Type":"ContainerStarted","Data":"a855254f9c4d469e190fb2f0987f018082ed7dbd3e29d3540006268d571e31b6"} Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.700623 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:01 crc kubenswrapper[4691]: E1124 08:00:01.700848 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:02.20082716 +0000 UTC m=+164.199776409 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.701371 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:01 crc kubenswrapper[4691]: E1124 08:00:01.701752 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:02.201733907 +0000 UTC m=+164.200683156 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.727321 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" event={"ID":"dd11e81f-c100-44a8-bc17-2ae2c0b2788d","Type":"ContainerStarted","Data":"a46f7fc7ddfc558defdd6df8010bdc98d624b6717fafe34b6693a7eefbbc6552"} Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.727368 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" event={"ID":"dd11e81f-c100-44a8-bc17-2ae2c0b2788d","Type":"ContainerStarted","Data":"73139684ca432dff6e6e035ba845450047443e81ef687423d18bae077f80720f"} Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.728229 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.740011 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-m8bj7" event={"ID":"12724cb5-e0ed-4c92-93e6-0f223dd11bea","Type":"ContainerStarted","Data":"3219cd290cff0860aabf9bfe31b07e0d2feb65226fdbfa15dd36b04addc87e9e"} Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.740205 4691 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-h2rcj container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.20:6443/healthz\": dial tcp 10.217.0.20:6443: connect: connection refused" start-of-body= Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.740287 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" podUID="dd11e81f-c100-44a8-bc17-2ae2c0b2788d" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.20:6443/healthz\": dial tcp 10.217.0.20:6443: connect: connection refused" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.751623 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qx8cv" event={"ID":"4d74fbd6-b66d-4ad5-8bf7-06ce9451fa96","Type":"ContainerStarted","Data":"82f889200891b895c36622d8ee6de22ad06362bb93131b7aa0ac9c42373d2f34"} Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.751686 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qx8cv" event={"ID":"4d74fbd6-b66d-4ad5-8bf7-06ce9451fa96","Type":"ContainerStarted","Data":"fd54bc5b2daf961bfb168f91c7e6c6b94ef97b22560c26d0fa85d0bda9e29268"} Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.765885 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5vnfr" event={"ID":"2d405da4-b29b-4e37-8871-a7db10fdc8d5","Type":"ContainerStarted","Data":"169eb0f7e944bcd258b5139bf4b603f6fd392597d997586e9afcbd2bea014d6d"} Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.765940 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5vnfr" event={"ID":"2d405da4-b29b-4e37-8871-a7db10fdc8d5","Type":"ContainerStarted","Data":"f35f0425622f11975852c8a48c8d88d46a2fcdaf8bc4466e660fb47cf2f596a8"} Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.767544 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-wgf2w" event={"ID":"d448c94d-0fa0-42d9-ac9a-eccf42ecc43a","Type":"ContainerStarted","Data":"38278acd4a9a6b342e8fd92f1dc489a92395c2317526f7112ca608a262645058"} Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.773823 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-h9wgf" event={"ID":"d611a7c5-68d8-4ea5-88b7-d3fad9baef65","Type":"ContainerStarted","Data":"caa616f09e9f12db47ec83f0816c6db9b7c79a5b8e34cf63015cd230a8fe8e4c"} Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.773879 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-h9wgf" event={"ID":"d611a7c5-68d8-4ea5-88b7-d3fad9baef65","Type":"ContainerStarted","Data":"cc6128be54dc3fe4ece5ffe50125204ac63d63c79095b536dab42d4add999264"} Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.802345 4691 generic.go:334] "Generic (PLEG): container finished" podID="278ddf9e-14a8-43dd-820a-bfda668bbce1" containerID="161c6a16fbf867cc39edc4ed9bccb4d744eea3e138a4c71eda914634feda2742" exitCode=0 Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.802757 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-sph7r" event={"ID":"278ddf9e-14a8-43dd-820a-bfda668bbce1","Type":"ContainerDied","Data":"161c6a16fbf867cc39edc4ed9bccb4d744eea3e138a4c71eda914634feda2742"} Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.802791 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-sph7r" event={"ID":"278ddf9e-14a8-43dd-820a-bfda668bbce1","Type":"ContainerStarted","Data":"81e5603835e30a3bbbfd5edc0b2646728c215393dfb080227a321090c4fc55e4"} Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.803025 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:01 crc kubenswrapper[4691]: E1124 08:00:01.803330 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:02.303307741 +0000 UTC m=+164.302256990 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.803443 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:01 crc kubenswrapper[4691]: E1124 08:00:01.804318 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:02.30430611 +0000 UTC m=+164.303255359 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.805601 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-7p2br" event={"ID":"53aa0d7f-022e-46a2-9e47-442eca753bbc","Type":"ContainerStarted","Data":"f9e05d2cc98059d0803b3f85d142fe15e42b3f82dbe59085c4ee4c3628e5779c"} Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.813334 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-gs8l7" event={"ID":"a556a2eb-6c10-4dd1-ad6e-5cc4084a4497","Type":"ContainerStarted","Data":"0c405912cfe40a7f4755b2ee6434c992788281c4edfca3becd058e00371879ed"} Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.818878 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-ljnvj" event={"ID":"a1312328-61e0-4c2a-9cf1-dd3c72ebbf4b","Type":"ContainerStarted","Data":"d6eb8d770471739be15c94aeb703659e97c705c40264f47df73ead439796cfe6"} Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.818941 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-ljnvj" event={"ID":"a1312328-61e0-4c2a-9cf1-dd3c72ebbf4b","Type":"ContainerStarted","Data":"fbfa5c30969574bf2c6b393bde00b758874d26cbe7c5145f7464f77cbbd66977"} Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.826028 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zxfcj" event={"ID":"6d32b123-7986-4bd2-abdf-b8be8c855817","Type":"ContainerStarted","Data":"cda0ce892e23123d7f58a5adaa328c85e0e79b70b03b6d40ac1d3baa53180407"} Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.827469 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-q96bx"] Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.828060 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zxfcj" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.839688 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fc99x" event={"ID":"c182330a-7ed5-4b31-8d80-d348e821c749","Type":"ContainerStarted","Data":"e149b90b858c7f9fa51e9096b4538ca83a36dbf3243284585c35d3bc3c7a2cd8"} Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.839748 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fc99x" event={"ID":"c182330a-7ed5-4b31-8d80-d348e821c749","Type":"ContainerStarted","Data":"33ef948d61da8be286f64e75e69f01a0fa26b3dc0dc8836c6d43b90a280f9827"} Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.845840 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-7cng4" event={"ID":"1b8890e7-15c8-4467-b31b-493b565c584a","Type":"ContainerStarted","Data":"b60efb1c85030efa34f0d9932363338a7cb9cd801a64f040b7fd59be3c935aa8"} Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.849286 4691 generic.go:334] "Generic (PLEG): container finished" podID="0577b42c-04f7-4ebe-afda-6b968475f302" containerID="f051fdf256b3406bfc9ead13728459406def51e7ef82499db6cff327b846397e" exitCode=0 Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.849477 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v" event={"ID":"0577b42c-04f7-4ebe-afda-6b968475f302","Type":"ContainerDied","Data":"f051fdf256b3406bfc9ead13728459406def51e7ef82499db6cff327b846397e"} Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.856263 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rs5qs" event={"ID":"b7afb5cd-ee13-48a2-9d6f-14ce967d98fe","Type":"ContainerStarted","Data":"d2c79f0edf5f95510768002fff92d12096125a6b799ed6eac9beae1f5873900b"} Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.857419 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-njfp2" event={"ID":"7ff52e52-aab5-4850-9d4d-4f427689c82b","Type":"ContainerStarted","Data":"0940df57d8d5b8438d53afc2c8fc55e7e6ac574cac833552bc258a1bad887949"} Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.868478 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-6n9v7" event={"ID":"a0e11694-91c8-4c71-88a5-f78c1285acb6","Type":"ContainerStarted","Data":"37a29b6ec9659e5dd5ec734f2ec9ef4959eafdd2f229215afe7423749e214368"} Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.871906 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5m226" event={"ID":"35aab505-1a5e-463c-a9af-ea76e2b866de","Type":"ContainerStarted","Data":"a8ec1e756c27d2cd6cc8db468f8d08d748e3f6515d83ab1fdd16300a3421a8bc"} Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.872071 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5m226" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.873546 4691 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-5m226 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.16:8443/healthz\": dial tcp 10.217.0.16:8443: connect: connection refused" start-of-body= Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.873603 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5m226" podUID="35aab505-1a5e-463c-a9af-ea76e2b866de" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.16:8443/healthz\": dial tcp 10.217.0.16:8443: connect: connection refused" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.874490 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" event={"ID":"4db5010a-367e-473a-8e9e-56febfd76781","Type":"ContainerStarted","Data":"0a85cca4470c934074f5b0c799a8180293e5597afef51831196f2eef3ee8e1a7"} Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.879370 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mdm27" event={"ID":"f29fbcd4-390b-4c16-8d35-331bf36b835a","Type":"ContainerStarted","Data":"8c6d45365e32c2052b6fa58290eaede5f47c1cc1fe49bae81c36e77b710a1d8b"} Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.885178 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-s85fr" event={"ID":"0935333c-8a58-4595-806a-d765d455f44c","Type":"ContainerStarted","Data":"1336e1d3520dcde2c7e8a55dddddf81e23e629951df50ad08b85312656941684"} Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.885362 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399505-btzlm" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.886273 4691 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-cbkxk container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/healthz\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.886322 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-cbkxk" podUID="28d1a6e7-60cf-4233-9298-4a561b105271" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.31:8080/healthz\": dial tcp 10.217.0.31:8080: connect: connection refused" Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.906725 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:01 crc kubenswrapper[4691]: E1124 08:00:01.906734 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:02.406704389 +0000 UTC m=+164.405653638 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.907513 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:01 crc kubenswrapper[4691]: E1124 08:00:01.909588 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:02.409572133 +0000 UTC m=+164.408521442 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:01 crc kubenswrapper[4691]: I1124 08:00:01.932708 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rnx8j"] Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.009491 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:02 crc kubenswrapper[4691]: E1124 08:00:02.009602 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:02.509582942 +0000 UTC m=+164.508532191 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.009805 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:02 crc kubenswrapper[4691]: E1124 08:00:02.010218 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:02.51020792 +0000 UTC m=+164.509157169 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.107546 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-ljnvj" podStartSLOduration=141.07376735 podStartE2EDuration="2m21.07376735s" podCreationTimestamp="2025-11-24 07:57:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:02.067641522 +0000 UTC m=+164.066590791" watchObservedRunningTime="2025-11-24 08:00:02.07376735 +0000 UTC m=+164.072716599" Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.128575 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:02 crc kubenswrapper[4691]: E1124 08:00:02.128957 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:02.628926755 +0000 UTC m=+164.627876004 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.129393 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:02 crc kubenswrapper[4691]: E1124 08:00:02.130092 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:02.630077738 +0000 UTC m=+164.629026987 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.152889 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5m226" podStartSLOduration=140.152865011 podStartE2EDuration="2m20.152865011s" podCreationTimestamp="2025-11-24 07:57:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:02.139617066 +0000 UTC m=+164.138566325" watchObservedRunningTime="2025-11-24 08:00:02.152865011 +0000 UTC m=+164.151814270" Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.154481 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4npq9"] Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.165098 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zxfcj" Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.195283 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wn248"] Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.230152 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:02 crc kubenswrapper[4691]: E1124 08:00:02.239130 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:02.7390829 +0000 UTC m=+164.738032149 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.242482 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:02 crc kubenswrapper[4691]: E1124 08:00:02.243040 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:02.743017754 +0000 UTC m=+164.741967003 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.277372 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-w92ph"] Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.306065 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-wrdd6"] Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.343177 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:02 crc kubenswrapper[4691]: E1124 08:00:02.343519 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:02.843503678 +0000 UTC m=+164.842452927 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.363003 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-h9wgf" podStartSLOduration=141.362965574 podStartE2EDuration="2m21.362965574s" podCreationTimestamp="2025-11-24 07:57:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:02.357743892 +0000 UTC m=+164.356693141" watchObservedRunningTime="2025-11-24 08:00:02.362965574 +0000 UTC m=+164.361914823" Nov 24 08:00:02 crc kubenswrapper[4691]: W1124 08:00:02.375472 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc592727d_7307_468a_843f_01717f868d4e.slice/crio-ffbc29439b74e4c593ad93ad595bb1e6a65131c3d98f27cd5378644a8208d400 WatchSource:0}: Error finding container ffbc29439b74e4c593ad93ad595bb1e6a65131c3d98f27cd5378644a8208d400: Status 404 returned error can't find the container with id ffbc29439b74e4c593ad93ad595bb1e6a65131c3d98f27cd5378644a8208d400 Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.402435 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-lsqrb"] Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.443025 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-c8zrh"] Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.444674 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:02 crc kubenswrapper[4691]: E1124 08:00:02.445140 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:02.945123554 +0000 UTC m=+164.944072803 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.542727 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-7shsd"] Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.542916 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5vnfr" podStartSLOduration=141.542895479 podStartE2EDuration="2m21.542895479s" podCreationTimestamp="2025-11-24 07:57:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:02.535615047 +0000 UTC m=+164.534564296" watchObservedRunningTime="2025-11-24 08:00:02.542895479 +0000 UTC m=+164.541844728" Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.547023 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:02 crc kubenswrapper[4691]: E1124 08:00:02.547583 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:03.047563255 +0000 UTC m=+165.046512504 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.572207 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-22hbl"] Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.628022 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-cbkxk" podStartSLOduration=140.627997955 podStartE2EDuration="2m20.627997955s" podCreationTimestamp="2025-11-24 07:57:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:02.625653507 +0000 UTC m=+164.624602786" watchObservedRunningTime="2025-11-24 08:00:02.627997955 +0000 UTC m=+164.626947204" Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.648298 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:02 crc kubenswrapper[4691]: E1124 08:00:02.648750 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:03.148729878 +0000 UTC m=+165.147679197 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.728379 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399520-x8l49"] Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.732138 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zxfcj" podStartSLOduration=140.732114194 podStartE2EDuration="2m20.732114194s" podCreationTimestamp="2025-11-24 07:57:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:02.724282586 +0000 UTC m=+164.723231835" watchObservedRunningTime="2025-11-24 08:00:02.732114194 +0000 UTC m=+164.731063443" Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.753479 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:02 crc kubenswrapper[4691]: E1124 08:00:02.753706 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:03.253657101 +0000 UTC m=+165.252606350 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.754025 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:02 crc kubenswrapper[4691]: E1124 08:00:02.754566 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:03.254558227 +0000 UTC m=+165.253507476 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.788879 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tqr2v"] Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.866778 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:02 crc kubenswrapper[4691]: E1124 08:00:02.867621 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:03.367602786 +0000 UTC m=+165.366552035 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.867980 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" podStartSLOduration=141.867969037 podStartE2EDuration="2m21.867969037s" podCreationTimestamp="2025-11-24 07:57:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:02.865947178 +0000 UTC m=+164.864896437" watchObservedRunningTime="2025-11-24 08:00:02.867969037 +0000 UTC m=+164.866918286" Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.926432 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-vq867"] Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.944312 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-w92ph" event={"ID":"843d6937-3e7e-4d0f-b8a7-163d2cf658eb","Type":"ContainerStarted","Data":"2258b75f055852970ea3d5fd19f68c87aee2ae1259e0f9ff0f40a6d164309811"} Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.957763 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-gs8l7" event={"ID":"a556a2eb-6c10-4dd1-ad6e-5cc4084a4497","Type":"ContainerStarted","Data":"7a7379d9f2ec3783425bda5f8f63c122e1828e27cbd80c468d99e04d587d6f19"} Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.963829 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wn248" event={"ID":"4f6239ed-b85f-4648-b4bf-aedce4312e26","Type":"ContainerStarted","Data":"227ec5525b778aab6d554088871496367a0a8313bfc3354c36cbeda663750c82"} Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.971459 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:02 crc kubenswrapper[4691]: E1124 08:00:02.971883 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:03.47187002 +0000 UTC m=+165.470819269 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.987077 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-zw9jm" event={"ID":"7dd994cd-9276-4e56-aa73-db6007804b05","Type":"ContainerStarted","Data":"581f6e058e5d301ce0efd6898c34e7ceb061318f23796a98d58fdf2c0a0cfcd1"} Nov 24 08:00:02 crc kubenswrapper[4691]: I1124 08:00:02.988290 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-zw9jm" Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.002743 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-lsqrb" event={"ID":"a8eb4c07-8b4c-44cc-8dbf-f102ef2baa45","Type":"ContainerStarted","Data":"369d423fb0fbb9e97bd3728136f416ae20a37393947d5be94d6da14fb6abb6b9"} Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.011836 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-7shsd" event={"ID":"d61589bf-88a6-4d18-97c8-b59460323cca","Type":"ContainerStarted","Data":"8445b15adb774a98317b3919c5793823852051d99271d3d22ceb34da8819045f"} Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.015224 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-22hbl" event={"ID":"f1001a2a-f7b8-46cc-b8e8-852fffb997e5","Type":"ContainerStarted","Data":"d996676d3d1344e64845b1d293153844481f3c8a7a23752c71fb50927aa81374"} Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.021346 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-c8zrh" event={"ID":"472a00a0-2e80-44f7-8857-348d3d88ab01","Type":"ContainerStarted","Data":"b5ffd8fdde97a3e820da9d1f0da0f7aefae95f4f492529807778bf0f3a359b1e"} Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.021886 4691 patch_prober.go:28] interesting pod/console-operator-58897d9998-zw9jm container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.24:8443/readyz\": dial tcp 10.217.0.24:8443: connect: connection refused" start-of-body= Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.021940 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-zw9jm" podUID="7dd994cd-9276-4e56-aa73-db6007804b05" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.24:8443/readyz\": dial tcp 10.217.0.24:8443: connect: connection refused" Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.027421 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fc99x" event={"ID":"c182330a-7ed5-4b31-8d80-d348e821c749","Type":"ContainerStarted","Data":"17be6919f04b3de7615310f20b9b62ac8c627b47300ff47b48ac45ef1081d0de"} Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.039605 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7rgnp" event={"ID":"6cb2d489-7d74-446f-9173-7bc5f2ff32c4","Type":"ContainerStarted","Data":"f1eb4b707cda475d8c297876d5b46304a981415a43e633c09a80ee1c438ceb23"} Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.044339 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-njfp2" event={"ID":"7ff52e52-aab5-4850-9d4d-4f427689c82b","Type":"ContainerStarted","Data":"050b7a02188a47c29a5b3e80cd801090a2fe9ba5420ee3fdcb88d50e667aa79b"} Nov 24 08:00:03 crc kubenswrapper[4691]: W1124 08:00:03.046167 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod72c587e8_bc91_4e8f_a545_e01d47139d1d.slice/crio-23fa03fc0bec2c3eb621b43324cc677243d819e14834ab45f1c17cf9539df9df WatchSource:0}: Error finding container 23fa03fc0bec2c3eb621b43324cc677243d819e14834ab45f1c17cf9539df9df: Status 404 returned error can't find the container with id 23fa03fc0bec2c3eb621b43324cc677243d819e14834ab45f1c17cf9539df9df Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.046395 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-7cng4" event={"ID":"1b8890e7-15c8-4467-b31b-493b565c584a","Type":"ContainerStarted","Data":"c0be7e04985fd735591b53b6a109c8dfcf26f8e9acb7bddb3f92aa06f7c9dae6"} Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.047372 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-7cng4" Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.049704 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-q96bx" event={"ID":"bd92380d-9848-49d8-9feb-07b71c7729bb","Type":"ContainerStarted","Data":"fd8659af27eb13047ffa9d940bb72ecde47040199f5c708ee6b42a5dc3366f47"} Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.050094 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-q96bx" Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.053135 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-s85fr" event={"ID":"0935333c-8a58-4595-806a-d765d455f44c","Type":"ContainerStarted","Data":"91b16c2f302184921984389de8036898a3104d29bf2d71246aad201878f3369b"} Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.063496 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-wgf2w" event={"ID":"d448c94d-0fa0-42d9-ac9a-eccf42ecc43a","Type":"ContainerStarted","Data":"7624d4584b79e6383e6592992c62691a43288c32da53cc3efac4fb9c74cb6eaf"} Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.063505 4691 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-q96bx container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.28:5443/healthz\": dial tcp 10.217.0.28:5443: connect: connection refused" start-of-body= Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.063556 4691 patch_prober.go:28] interesting pod/downloads-7954f5f757-7cng4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.063580 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-q96bx" podUID="bd92380d-9848-49d8-9feb-07b71c7729bb" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.28:5443/healthz\": dial tcp 10.217.0.28:5443: connect: connection refused" Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.063607 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7cng4" podUID="1b8890e7-15c8-4467-b31b-493b565c584a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.072106 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:03 crc kubenswrapper[4691]: E1124 08:00:03.080175 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:03.58014141 +0000 UTC m=+165.579090659 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.087135 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mdm27" event={"ID":"f29fbcd4-390b-4c16-8d35-331bf36b835a","Type":"ContainerStarted","Data":"795002b13c2d9483e828925df9705667352f62f2e4695974def48fccf7cb5917"} Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.098211 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-sr2d4"] Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.109925 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-wrdd6" event={"ID":"c592727d-7307-468a-843f-01717f868d4e","Type":"ContainerStarted","Data":"ffbc29439b74e4c593ad93ad595bb1e6a65131c3d98f27cd5378644a8208d400"} Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.121703 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rnx8j" event={"ID":"2fc1223e-8b42-43c4-9862-41c6026db6de","Type":"ContainerStarted","Data":"9872469a9880f364d0e360126e453f1c7d756a8fe88ce8c7e95dba7242a723a9"} Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.133293 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-njfp2" Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.134647 4691 patch_prober.go:28] interesting pod/router-default-5444994796-njfp2 container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.134913 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-njfp2" podUID="7ff52e52-aab5-4850-9d4d-4f427689c82b" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.140748 4691 generic.go:334] "Generic (PLEG): container finished" podID="4db5010a-367e-473a-8e9e-56febfd76781" containerID="1c7ba20266e491e41530a696899e485ce14aec369d8e6d3cb9eace689f7a544e" exitCode=0 Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.141545 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" event={"ID":"4db5010a-367e-473a-8e9e-56febfd76781","Type":"ContainerDied","Data":"1c7ba20266e491e41530a696899e485ce14aec369d8e6d3cb9eace689f7a544e"} Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.158901 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-744kk" event={"ID":"4149c672-979a-4602-9f51-d0718d65d99a","Type":"ContainerStarted","Data":"b28bd0d35a7c81d9207c28a84244d6db4c71b7a7eb9e8ab74c14d946abe9bbbe"} Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.165415 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399505-btzlm"] Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.167559 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4npq9" event={"ID":"d7748b2b-46c9-4709-bb46-545d8209bb5f","Type":"ContainerStarted","Data":"af803f32901a9f2ccff8a3d8ddbcd315a6e4856e6e9bfc2efc2d3b31b9ae51db"} Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.170743 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399505-btzlm"] Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.175961 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.179377 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9znlm"] Nov 24 08:00:03 crc kubenswrapper[4691]: E1124 08:00:03.180833 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:03.680818549 +0000 UTC m=+165.679767798 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.188810 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5m226" event={"ID":"35aab505-1a5e-463c-a9af-ea76e2b866de","Type":"ContainerStarted","Data":"dd60598fd4575cbee6bf5b9e4f9991c518114ec7d333f40e4e1c4e266f9677e8"} Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.210395 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5m226" Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.217115 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-s85fr" podStartSLOduration=142.217092245 podStartE2EDuration="2m22.217092245s" podCreationTimestamp="2025-11-24 07:57:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:03.206694732 +0000 UTC m=+165.205643981" watchObservedRunningTime="2025-11-24 08:00:03.217092245 +0000 UTC m=+165.216041494" Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.275942 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-zw9jm" podStartSLOduration=142.275923006 podStartE2EDuration="2m22.275923006s" podCreationTimestamp="2025-11-24 07:57:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:03.247360345 +0000 UTC m=+165.246309594" watchObservedRunningTime="2025-11-24 08:00:03.275923006 +0000 UTC m=+165.274872245" Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.279119 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-wgf2w" podStartSLOduration=5.279110139 podStartE2EDuration="5.279110139s" podCreationTimestamp="2025-11-24 07:59:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:03.273108564 +0000 UTC m=+165.272057823" watchObservedRunningTime="2025-11-24 08:00:03.279110139 +0000 UTC m=+165.278059388" Nov 24 08:00:03 crc kubenswrapper[4691]: E1124 08:00:03.277084 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:03.777062759 +0000 UTC m=+165.776012008 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.277008 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.280063 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:03 crc kubenswrapper[4691]: E1124 08:00:03.286006 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:03.785982399 +0000 UTC m=+165.784931738 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.381889 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:03 crc kubenswrapper[4691]: E1124 08:00:03.382482 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:03.882430475 +0000 UTC m=+165.881379724 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.388890 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-q96bx" podStartSLOduration=141.388876183 podStartE2EDuration="2m21.388876183s" podCreationTimestamp="2025-11-24 07:57:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:03.362649189 +0000 UTC m=+165.361598438" watchObservedRunningTime="2025-11-24 08:00:03.388876183 +0000 UTC m=+165.387825422" Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.389644 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-njfp2" podStartSLOduration=142.389639625 podStartE2EDuration="2m22.389639625s" podCreationTimestamp="2025-11-24 07:57:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:03.388544043 +0000 UTC m=+165.387493302" watchObservedRunningTime="2025-11-24 08:00:03.389639625 +0000 UTC m=+165.388588874" Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.454795 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-fc99x" podStartSLOduration=142.4547743 podStartE2EDuration="2m22.4547743s" podCreationTimestamp="2025-11-24 07:57:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:03.454058869 +0000 UTC m=+165.453008118" watchObservedRunningTime="2025-11-24 08:00:03.4547743 +0000 UTC m=+165.453723559" Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.485172 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:03 crc kubenswrapper[4691]: E1124 08:00:03.485650 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:03.985629038 +0000 UTC m=+165.984578287 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.519848 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-7cng4" podStartSLOduration=142.519823232 podStartE2EDuration="2m22.519823232s" podCreationTimestamp="2025-11-24 07:57:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:03.516001611 +0000 UTC m=+165.514950870" watchObservedRunningTime="2025-11-24 08:00:03.519823232 +0000 UTC m=+165.518772481" Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.558366 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rnx8j" podStartSLOduration=142.558332273 podStartE2EDuration="2m22.558332273s" podCreationTimestamp="2025-11-24 07:57:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:03.557812348 +0000 UTC m=+165.556761597" watchObservedRunningTime="2025-11-24 08:00:03.558332273 +0000 UTC m=+165.557281512" Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.586423 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:03 crc kubenswrapper[4691]: E1124 08:00:03.587930 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:04.087905613 +0000 UTC m=+166.086854862 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.691756 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:03 crc kubenswrapper[4691]: E1124 08:00:03.692124 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:04.192112215 +0000 UTC m=+166.191061464 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.792975 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:03 crc kubenswrapper[4691]: E1124 08:00:03.793571 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:04.293555997 +0000 UTC m=+166.292505236 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.799206 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 08:00:03 crc kubenswrapper[4691]: I1124 08:00:03.894547 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:03 crc kubenswrapper[4691]: E1124 08:00:03.894961 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:04.394947047 +0000 UTC m=+166.393896296 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.000015 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:04 crc kubenswrapper[4691]: E1124 08:00:04.000489 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:04.500469607 +0000 UTC m=+166.499418866 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.000924 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:04 crc kubenswrapper[4691]: E1124 08:00:04.001246 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:04.501235989 +0000 UTC m=+166.500185248 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.102384 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:04 crc kubenswrapper[4691]: E1124 08:00:04.103063 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:04.603048091 +0000 UTC m=+166.601997340 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.133946 4691 patch_prober.go:28] interesting pod/router-default-5444994796-njfp2 container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.134030 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-njfp2" podUID="7ff52e52-aab5-4850-9d4d-4f427689c82b" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.196719 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tqr2v" event={"ID":"5ccb619a-2f5c-4b42-9dbc-00479b290b3a","Type":"ContainerStarted","Data":"83ad8b7a53587d5efabc8b244bf4fc7651aed4b91fc0f800aff935df1a8cbb04"} Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.196772 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tqr2v" event={"ID":"5ccb619a-2f5c-4b42-9dbc-00479b290b3a","Type":"ContainerStarted","Data":"e7e353893a6aa1a7d459010b7cfcd8d28f81bc5356740bd4169cff906d76fab1"} Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.199716 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v" event={"ID":"0577b42c-04f7-4ebe-afda-6b968475f302","Type":"ContainerStarted","Data":"90bd7560a2e57dfc8748dcbad66b5c0d868ea245e3eab12eeebf95e55a7c2519"} Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.201523 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-744kk" event={"ID":"4149c672-979a-4602-9f51-d0718d65d99a","Type":"ContainerStarted","Data":"312a0477461d36aec1f21fb75ab41e04deabe44d78518a2ac74f93f8c6b30e05"} Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.209768 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-6n9v7" event={"ID":"a0e11694-91c8-4c71-88a5-f78c1285acb6","Type":"ContainerStarted","Data":"617ec85a406ff0810923d60c9281e9b77433ec70cbf54543be60d54de4e5813c"} Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.211039 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399520-x8l49" event={"ID":"72c587e8-bc91-4e8f-a545-e01d47139d1d","Type":"ContainerStarted","Data":"23fa03fc0bec2c3eb621b43324cc677243d819e14834ab45f1c17cf9539df9df"} Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.212698 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:04 crc kubenswrapper[4691]: E1124 08:00:04.213656 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:04.713639449 +0000 UTC m=+166.712588698 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.215920 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9znlm" event={"ID":"85d176b6-4ccf-4790-b1c6-08831a67e03c","Type":"ContainerStarted","Data":"1f6eeb434d2d224d78a3725c076c607d8423ad734769904b8b6562d7689c703a"} Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.223219 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7rgnp" event={"ID":"6cb2d489-7d74-446f-9173-7bc5f2ff32c4","Type":"ContainerStarted","Data":"2ea9d5793905f27e79064eb2faf45496f0a3f3200c3fec293561558c3572930c"} Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.229835 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qx8cv" event={"ID":"4d74fbd6-b66d-4ad5-8bf7-06ce9451fa96","Type":"ContainerStarted","Data":"decb26455be2150bac703936d43d8c8781406299c2882fbcd78589e127840a8d"} Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.235350 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-6n9v7" podStartSLOduration=143.23533259 podStartE2EDuration="2m23.23533259s" podCreationTimestamp="2025-11-24 07:57:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:04.232843588 +0000 UTC m=+166.231792857" watchObservedRunningTime="2025-11-24 08:00:04.23533259 +0000 UTC m=+166.234281839" Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.240877 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4npq9" event={"ID":"d7748b2b-46c9-4709-bb46-545d8209bb5f","Type":"ContainerStarted","Data":"4c06a6d76c2656bd58b3a554f95f9201d1ef1ca183c77f5ff3415dadccc8abb9"} Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.253898 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-m8bj7" event={"ID":"12724cb5-e0ed-4c92-93e6-0f223dd11bea","Type":"ContainerStarted","Data":"397ff1fc216bbe0178b8966aa9f5b48b71150d1cedb0e61d8ef527049f3488d3"} Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.260290 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7rgnp" podStartSLOduration=143.260272776 podStartE2EDuration="2m23.260272776s" podCreationTimestamp="2025-11-24 07:57:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:04.259974947 +0000 UTC m=+166.258924206" watchObservedRunningTime="2025-11-24 08:00:04.260272776 +0000 UTC m=+166.259222025" Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.260539 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-q96bx" event={"ID":"bd92380d-9848-49d8-9feb-07b71c7729bb","Type":"ContainerStarted","Data":"5fd65e118b97cede1267e40bc681de09bc9f6308172afb396fe7366971d5e2e2"} Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.263063 4691 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-q96bx container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.28:5443/healthz\": dial tcp 10.217.0.28:5443: connect: connection refused" start-of-body= Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.263115 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-q96bx" podUID="bd92380d-9848-49d8-9feb-07b71c7729bb" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.28:5443/healthz\": dial tcp 10.217.0.28:5443: connect: connection refused" Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.270982 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-wrdd6" event={"ID":"c592727d-7307-468a-843f-01717f868d4e","Type":"ContainerStarted","Data":"88afa91ffbdb1b199fc78d837c284148ef85fbfe8b983960d5227c1b612ab8d1"} Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.275573 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-w92ph" event={"ID":"843d6937-3e7e-4d0f-b8a7-163d2cf658eb","Type":"ContainerStarted","Data":"27cb5d498bffea975fed9cb1ce2fc02ba36535e53a5ae8599ebe7c062023202a"} Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.280323 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rnx8j" event={"ID":"2fc1223e-8b42-43c4-9862-41c6026db6de","Type":"ContainerStarted","Data":"02976203946e0c66c4b42a596c8fac550589c35a466c49a1de427fecde24d341"} Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.282115 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-vq867" event={"ID":"9c8dc88a-6052-4b16-90e4-7377d8d6969d","Type":"ContainerStarted","Data":"f97955ed8a636a67ec1cb3080dfbff894e03ac8bc4e5a68989f7e8139daa4243"} Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.289219 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-gs8l7" event={"ID":"a556a2eb-6c10-4dd1-ad6e-5cc4084a4497","Type":"ContainerStarted","Data":"c0bd039a57b1649e6712abdd658701bef759e24c028384b5f9a16c7287b42465"} Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.292582 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-sph7r" event={"ID":"278ddf9e-14a8-43dd-820a-bfda668bbce1","Type":"ContainerStarted","Data":"3a77a5f854f71b1cefdab8664e592f525df135bf2ed3412131e230d8011d0db8"} Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.293113 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-sph7r" Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.296496 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-7shsd" event={"ID":"d61589bf-88a6-4d18-97c8-b59460323cca","Type":"ContainerStarted","Data":"db875a1f16ed552bdf1c427561a4176dd0e18198f74100554c3c59c954a18154"} Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.297704 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4npq9" podStartSLOduration=143.297680904 podStartE2EDuration="2m23.297680904s" podCreationTimestamp="2025-11-24 07:57:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:04.297025725 +0000 UTC m=+166.295974984" watchObservedRunningTime="2025-11-24 08:00:04.297680904 +0000 UTC m=+166.296630153" Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.298942 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qx8cv" podStartSLOduration=143.29893353 podStartE2EDuration="2m23.29893353s" podCreationTimestamp="2025-11-24 07:57:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:04.283255284 +0000 UTC m=+166.282204543" watchObservedRunningTime="2025-11-24 08:00:04.29893353 +0000 UTC m=+166.297882769" Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.308698 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mdm27" event={"ID":"f29fbcd4-390b-4c16-8d35-331bf36b835a","Type":"ContainerStarted","Data":"4e1320c4b5e25a28fb638645c31606573e430a0faf92a5e5fb7774bd3fc56aeb"} Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.314999 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:04 crc kubenswrapper[4691]: E1124 08:00:04.316478 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:04.81643866 +0000 UTC m=+166.815387919 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.321841 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-7p2br" event={"ID":"53aa0d7f-022e-46a2-9e47-442eca753bbc","Type":"ContainerStarted","Data":"4808f892e07c0f47db01cc95d7f770e181c0db20cc06db04e7d2b9a5c5a8fc21"} Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.322159 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-7p2br" Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.329926 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rs5qs" event={"ID":"b7afb5cd-ee13-48a2-9d6f-14ce967d98fe","Type":"ContainerStarted","Data":"8faaffb812f43ddeb9bc1735e76afbfa56d8520cc7accb5e8b617cc9f5f32011"} Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.332367 4691 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-7p2br container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.332429 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-7p2br" podUID="53aa0d7f-022e-46a2-9e47-442eca753bbc" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.333939 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-sr2d4" event={"ID":"a53c972e-9ea8-4b23-b43b-e432f037faec","Type":"ContainerStarted","Data":"6cb80d4fe41251d2d36e4df878879132ac2df6441bdd5dc26310281ff77bbfe6"} Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.336834 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-k5dmp" event={"ID":"4e3edb7c-7fcb-4525-975e-227838552f54","Type":"ContainerStarted","Data":"b18834d5435b13441ea354ec3be3427d3df2d3c7a4322358858c8d70f7f2ac61"} Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.336873 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-k5dmp" event={"ID":"4e3edb7c-7fcb-4525-975e-227838552f54","Type":"ContainerStarted","Data":"96cc5c422be8738de18bba6cef1a38ade691eda40edee8013a14636bd787e227"} Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.344977 4691 patch_prober.go:28] interesting pod/console-operator-58897d9998-zw9jm container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.24:8443/readyz\": dial tcp 10.217.0.24:8443: connect: connection refused" start-of-body= Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.345046 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-zw9jm" podUID="7dd994cd-9276-4e56-aa73-db6007804b05" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.24:8443/readyz\": dial tcp 10.217.0.24:8443: connect: connection refused" Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.347138 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-w92ph" podStartSLOduration=6.347121083 podStartE2EDuration="6.347121083s" podCreationTimestamp="2025-11-24 07:59:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:04.324238617 +0000 UTC m=+166.323187876" watchObservedRunningTime="2025-11-24 08:00:04.347121083 +0000 UTC m=+166.346070332" Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.347519 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-gs8l7" podStartSLOduration=143.347512994 podStartE2EDuration="2m23.347512994s" podCreationTimestamp="2025-11-24 07:57:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:04.34566024 +0000 UTC m=+166.344609499" watchObservedRunningTime="2025-11-24 08:00:04.347512994 +0000 UTC m=+166.346462243" Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.352260 4691 patch_prober.go:28] interesting pod/downloads-7954f5f757-7cng4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.352361 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7cng4" podUID="1b8890e7-15c8-4467-b31b-493b565c584a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.384696 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-7p2br" podStartSLOduration=143.384653655 podStartE2EDuration="2m23.384653655s" podCreationTimestamp="2025-11-24 07:57:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:04.362776278 +0000 UTC m=+166.361725547" watchObservedRunningTime="2025-11-24 08:00:04.384653655 +0000 UTC m=+166.383602904" Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.387137 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-mdm27" podStartSLOduration=143.387116596 podStartE2EDuration="2m23.387116596s" podCreationTimestamp="2025-11-24 07:57:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:04.384063857 +0000 UTC m=+166.383013116" watchObservedRunningTime="2025-11-24 08:00:04.387116596 +0000 UTC m=+166.386065845" Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.416881 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:04 crc kubenswrapper[4691]: E1124 08:00:04.429187 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:04.92917025 +0000 UTC m=+166.928119499 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.442820 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-sph7r" podStartSLOduration=143.442793336 podStartE2EDuration="2m23.442793336s" podCreationTimestamp="2025-11-24 07:57:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:04.432837676 +0000 UTC m=+166.431786935" watchObservedRunningTime="2025-11-24 08:00:04.442793336 +0000 UTC m=+166.441742585" Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.457000 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rs5qs" podStartSLOduration=143.456978079 podStartE2EDuration="2m23.456978079s" podCreationTimestamp="2025-11-24 07:57:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:04.455106214 +0000 UTC m=+166.454055463" watchObservedRunningTime="2025-11-24 08:00:04.456978079 +0000 UTC m=+166.455927338" Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.518056 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:04 crc kubenswrapper[4691]: E1124 08:00:04.518299 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:05.018269162 +0000 UTC m=+167.017218411 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.518398 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:04 crc kubenswrapper[4691]: E1124 08:00:04.518775 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:05.018765086 +0000 UTC m=+167.017714405 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.619628 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:04 crc kubenswrapper[4691]: E1124 08:00:04.619882 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:05.119854148 +0000 UTC m=+167.118803387 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.620028 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:04 crc kubenswrapper[4691]: E1124 08:00:04.620379 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:05.120370833 +0000 UTC m=+167.119320082 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.720923 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:04 crc kubenswrapper[4691]: E1124 08:00:04.721143 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:05.221112764 +0000 UTC m=+167.220062013 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.721556 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:04 crc kubenswrapper[4691]: E1124 08:00:04.721884 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:05.221872496 +0000 UTC m=+167.220821745 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.775200 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6b5b763e-263b-49e8-80dd-6be14733ef80" path="/var/lib/kubelet/pods/6b5b763e-263b-49e8-80dd-6be14733ef80/volumes" Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.823086 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:04 crc kubenswrapper[4691]: E1124 08:00:04.823245 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:05.323219734 +0000 UTC m=+167.322168983 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.823832 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:04 crc kubenswrapper[4691]: E1124 08:00:04.824236 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:05.324225144 +0000 UTC m=+167.323174393 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.924991 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:04 crc kubenswrapper[4691]: E1124 08:00:04.925205 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:05.42516505 +0000 UTC m=+167.424114299 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.925476 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/21147e4f-4335-4c12-9a81-aa333d8301db-metrics-certs\") pod \"network-metrics-daemon-98whr\" (UID: \"21147e4f-4335-4c12-9a81-aa333d8301db\") " pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.925643 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:04 crc kubenswrapper[4691]: E1124 08:00:04.926025 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:05.426016835 +0000 UTC m=+167.424966084 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:04 crc kubenswrapper[4691]: I1124 08:00:04.933365 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/21147e4f-4335-4c12-9a81-aa333d8301db-metrics-certs\") pod \"network-metrics-daemon-98whr\" (UID: \"21147e4f-4335-4c12-9a81-aa333d8301db\") " pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.026610 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:05 crc kubenswrapper[4691]: E1124 08:00:05.026785 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:05.526754036 +0000 UTC m=+167.525703295 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.027059 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:05 crc kubenswrapper[4691]: E1124 08:00:05.027396 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:05.527382455 +0000 UTC m=+167.526331704 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.086686 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-98whr" Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.128250 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:05 crc kubenswrapper[4691]: E1124 08:00:05.128440 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:05.628413044 +0000 UTC m=+167.627362293 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.128652 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:05 crc kubenswrapper[4691]: E1124 08:00:05.128977 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:05.62896188 +0000 UTC m=+167.627911129 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.141004 4691 patch_prober.go:28] interesting pod/router-default-5444994796-njfp2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 08:00:05 crc kubenswrapper[4691]: [-]has-synced failed: reason withheld Nov 24 08:00:05 crc kubenswrapper[4691]: [+]process-running ok Nov 24 08:00:05 crc kubenswrapper[4691]: healthz check failed Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.141363 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-njfp2" podUID="7ff52e52-aab5-4850-9d4d-4f427689c82b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.230208 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:05 crc kubenswrapper[4691]: E1124 08:00:05.230571 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:05.730554076 +0000 UTC m=+167.729503325 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.336244 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:05 crc kubenswrapper[4691]: E1124 08:00:05.336897 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:05.836880698 +0000 UTC m=+167.835829947 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.407287 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-m8bj7" event={"ID":"12724cb5-e0ed-4c92-93e6-0f223dd11bea","Type":"ContainerStarted","Data":"30d93f88944b1334e1891735ea11755395ba13eabf30225fa6e0d454bbd222af"} Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.409649 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tqr2v" event={"ID":"5ccb619a-2f5c-4b42-9dbc-00479b290b3a","Type":"ContainerStarted","Data":"aa280c81ffb9187776e3ab7faf22e305218bf724efbb3e1756e8ac2f5fc938b5"} Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.410441 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tqr2v" Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.411938 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-vq867" event={"ID":"9c8dc88a-6052-4b16-90e4-7377d8d6969d","Type":"ContainerStarted","Data":"831c491ac4a97404fbb55980a8a0d0f7395946b7d2ad6d5983ea7c63e8e727e0"} Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.412747 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-vq867" Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.415631 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wn248" event={"ID":"4f6239ed-b85f-4648-b4bf-aedce4312e26","Type":"ContainerStarted","Data":"61a3395466329254a591babc48fbf0f557c5a73ad87c1085dc2b693d2674709f"} Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.425648 4691 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-vq867 container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.33:8443/healthz\": dial tcp 10.217.0.33:8443: connect: connection refused" start-of-body= Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.425707 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-vq867" podUID="9c8dc88a-6052-4b16-90e4-7377d8d6969d" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.33:8443/healthz\": dial tcp 10.217.0.33:8443: connect: connection refused" Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.432536 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-k5dmp" podStartSLOduration=144.432517231 podStartE2EDuration="2m24.432517231s" podCreationTimestamp="2025-11-24 07:57:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:04.50615979 +0000 UTC m=+166.505109049" watchObservedRunningTime="2025-11-24 08:00:05.432517231 +0000 UTC m=+167.431466480" Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.437735 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:05 crc kubenswrapper[4691]: E1124 08:00:05.438275 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:05.938259428 +0000 UTC m=+167.937208667 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.447310 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-sr2d4" event={"ID":"a53c972e-9ea8-4b23-b43b-e432f037faec","Type":"ContainerStarted","Data":"eda407533a8b00db8b4d62d50d2f189038fd8f72c092787e8a9a7991231869d9"} Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.448735 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-m8bj7" podStartSLOduration=143.448724472 podStartE2EDuration="2m23.448724472s" podCreationTimestamp="2025-11-24 07:57:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:05.428831584 +0000 UTC m=+167.427780833" watchObservedRunningTime="2025-11-24 08:00:05.448724472 +0000 UTC m=+167.447673721" Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.452462 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-98whr"] Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.452494 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-vq867" podStartSLOduration=143.452484512 podStartE2EDuration="2m23.452484512s" podCreationTimestamp="2025-11-24 07:57:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:05.449495405 +0000 UTC m=+167.448444654" watchObservedRunningTime="2025-11-24 08:00:05.452484512 +0000 UTC m=+167.451433751" Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.464801 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399520-x8l49" event={"ID":"72c587e8-bc91-4e8f-a545-e01d47139d1d","Type":"ContainerStarted","Data":"b66da3efaff2f4a42d58550587f53890f6a61eb998bd1c65fb6cf3a611c0a8c0"} Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.469333 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wn248" podStartSLOduration=144.469317501 podStartE2EDuration="2m24.469317501s" podCreationTimestamp="2025-11-24 07:57:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:05.468005723 +0000 UTC m=+167.466954982" watchObservedRunningTime="2025-11-24 08:00:05.469317501 +0000 UTC m=+167.468266740" Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.479979 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-c8zrh" event={"ID":"472a00a0-2e80-44f7-8857-348d3d88ab01","Type":"ContainerStarted","Data":"77c8dfb5f071c14156e3fe8933a858407d1915b1acacc45b0709b708f55fc215"} Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.487374 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tqr2v" podStartSLOduration=143.487355916 podStartE2EDuration="2m23.487355916s" podCreationTimestamp="2025-11-24 07:57:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:05.48575071 +0000 UTC m=+167.484699949" watchObservedRunningTime="2025-11-24 08:00:05.487355916 +0000 UTC m=+167.486305165" Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.494304 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9znlm" event={"ID":"85d176b6-4ccf-4790-b1c6-08831a67e03c","Type":"ContainerStarted","Data":"a70867f02f1253467e3f24d09e8cff9ac7fa3a7dedf4ba748d9d958c357f34af"} Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.512424 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-lsqrb" event={"ID":"a8eb4c07-8b4c-44cc-8dbf-f102ef2baa45","Type":"ContainerStarted","Data":"eb784f3bd6fcb12a3f645a1b8dec8437b5fc84ce142c91676514f0b07fb83c05"} Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.512627 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-lsqrb" event={"ID":"a8eb4c07-8b4c-44cc-8dbf-f102ef2baa45","Type":"ContainerStarted","Data":"5eec73783f440f777f11d2e97ad31c55d8f4409653cf7b327011d3dea103cf49"} Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.530986 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" event={"ID":"4db5010a-367e-473a-8e9e-56febfd76781","Type":"ContainerStarted","Data":"a7a59daf5adb71692e3d826c6f891e53b1cc9290fe56a84369e1fd936df50aa1"} Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.531065 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29399520-x8l49" podStartSLOduration=5.531041577 podStartE2EDuration="5.531041577s" podCreationTimestamp="2025-11-24 08:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:05.506858074 +0000 UTC m=+167.505807343" watchObservedRunningTime="2025-11-24 08:00:05.531041577 +0000 UTC m=+167.529990836" Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.538493 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-wrdd6" event={"ID":"c592727d-7307-468a-843f-01717f868d4e","Type":"ContainerStarted","Data":"769a08d18cce9029503e0f69096bc524b627ba966c085f001b7b19e5a7b60302"} Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.541132 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-744kk" event={"ID":"4149c672-979a-4602-9f51-d0718d65d99a","Type":"ContainerStarted","Data":"274594e29db0efb85e6632cbb4b32a1d4c6c34bfc2e7fb0427ce7e1911b1ad5d"} Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.541498 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.542007 4691 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-q96bx container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.28:5443/healthz\": dial tcp 10.217.0.28:5443: connect: connection refused" start-of-body= Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.542076 4691 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-7p2br container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.542315 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-q96bx" podUID="bd92380d-9848-49d8-9feb-07b71c7729bb" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.28:5443/healthz\": dial tcp 10.217.0.28:5443: connect: connection refused" Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.542393 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-7p2br" podUID="53aa0d7f-022e-46a2-9e47-442eca753bbc" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" Nov 24 08:00:05 crc kubenswrapper[4691]: E1124 08:00:05.542812 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:06.042793129 +0000 UTC m=+168.041742378 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.543265 4691 patch_prober.go:28] interesting pod/downloads-7954f5f757-7cng4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.543298 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7cng4" podUID="1b8890e7-15c8-4467-b31b-493b565c584a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.546607 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-c8zrh" podStartSLOduration=143.54658962 podStartE2EDuration="2m23.54658962s" podCreationTimestamp="2025-11-24 07:57:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:05.530746919 +0000 UTC m=+167.529696168" watchObservedRunningTime="2025-11-24 08:00:05.54658962 +0000 UTC m=+167.545538869" Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.556853 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-lsqrb" podStartSLOduration=143.556827658 podStartE2EDuration="2m23.556827658s" podCreationTimestamp="2025-11-24 07:57:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:05.545441156 +0000 UTC m=+167.544390405" watchObservedRunningTime="2025-11-24 08:00:05.556827658 +0000 UTC m=+167.555776907" Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.585685 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9znlm" podStartSLOduration=144.585663457 podStartE2EDuration="2m24.585663457s" podCreationTimestamp="2025-11-24 07:57:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:05.581613189 +0000 UTC m=+167.580562468" watchObservedRunningTime="2025-11-24 08:00:05.585663457 +0000 UTC m=+167.584612706" Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.610489 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-wrdd6" podStartSLOduration=143.610467278 podStartE2EDuration="2m23.610467278s" podCreationTimestamp="2025-11-24 07:57:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:05.607362338 +0000 UTC m=+167.606311587" watchObservedRunningTime="2025-11-24 08:00:05.610467278 +0000 UTC m=+167.609416537" Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.645074 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:05 crc kubenswrapper[4691]: E1124 08:00:05.645307 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:06.145274621 +0000 UTC m=+168.144223870 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.645969 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:05 crc kubenswrapper[4691]: E1124 08:00:05.651513 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:06.151489222 +0000 UTC m=+168.150438471 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.660956 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v" podStartSLOduration=143.660935397 podStartE2EDuration="2m23.660935397s" podCreationTimestamp="2025-11-24 07:57:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:05.641942724 +0000 UTC m=+167.640891983" watchObservedRunningTime="2025-11-24 08:00:05.660935397 +0000 UTC m=+167.659884646" Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.756124 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:05 crc kubenswrapper[4691]: E1124 08:00:05.756659 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:06.256640471 +0000 UTC m=+168.255589720 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.784782 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-744kk" podStartSLOduration=143.784762729 podStartE2EDuration="2m23.784762729s" podCreationTimestamp="2025-11-24 07:57:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:05.700106786 +0000 UTC m=+167.699056035" watchObservedRunningTime="2025-11-24 08:00:05.784762729 +0000 UTC m=+167.783711978" Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.858857 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:05 crc kubenswrapper[4691]: E1124 08:00:05.859209 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:06.359197845 +0000 UTC m=+168.358147094 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.960249 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:05 crc kubenswrapper[4691]: E1124 08:00:05.960478 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:06.46043213 +0000 UTC m=+168.459381379 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:05 crc kubenswrapper[4691]: I1124 08:00:05.960643 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:05 crc kubenswrapper[4691]: E1124 08:00:05.961149 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:06.461136731 +0000 UTC m=+168.460085980 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:06 crc kubenswrapper[4691]: I1124 08:00:06.062060 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:06 crc kubenswrapper[4691]: E1124 08:00:06.062284 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:06.562246873 +0000 UTC m=+168.561196122 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:06 crc kubenswrapper[4691]: I1124 08:00:06.062491 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:06 crc kubenswrapper[4691]: E1124 08:00:06.062986 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:06.562974744 +0000 UTC m=+168.561924213 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:06 crc kubenswrapper[4691]: I1124 08:00:06.127544 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-zw9jm" Nov 24 08:00:06 crc kubenswrapper[4691]: I1124 08:00:06.134717 4691 patch_prober.go:28] interesting pod/router-default-5444994796-njfp2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 08:00:06 crc kubenswrapper[4691]: [-]has-synced failed: reason withheld Nov 24 08:00:06 crc kubenswrapper[4691]: [+]process-running ok Nov 24 08:00:06 crc kubenswrapper[4691]: healthz check failed Nov 24 08:00:06 crc kubenswrapper[4691]: I1124 08:00:06.134787 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-njfp2" podUID="7ff52e52-aab5-4850-9d4d-4f427689c82b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 08:00:06 crc kubenswrapper[4691]: I1124 08:00:06.142551 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-7shsd" podStartSLOduration=144.142531369 podStartE2EDuration="2m24.142531369s" podCreationTimestamp="2025-11-24 07:57:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:05.794844043 +0000 UTC m=+167.793793292" watchObservedRunningTime="2025-11-24 08:00:06.142531369 +0000 UTC m=+168.141480618" Nov 24 08:00:06 crc kubenswrapper[4691]: I1124 08:00:06.164095 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:06 crc kubenswrapper[4691]: E1124 08:00:06.164397 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:06.664360834 +0000 UTC m=+168.663310083 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:06 crc kubenswrapper[4691]: I1124 08:00:06.164563 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:06 crc kubenswrapper[4691]: E1124 08:00:06.164890 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:06.664873179 +0000 UTC m=+168.663822498 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:06 crc kubenswrapper[4691]: I1124 08:00:06.266422 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:06 crc kubenswrapper[4691]: E1124 08:00:06.266593 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:06.766563007 +0000 UTC m=+168.765512256 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:06 crc kubenswrapper[4691]: I1124 08:00:06.267321 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:06 crc kubenswrapper[4691]: E1124 08:00:06.267729 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:06.767714211 +0000 UTC m=+168.766663460 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:06 crc kubenswrapper[4691]: I1124 08:00:06.368567 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:06 crc kubenswrapper[4691]: E1124 08:00:06.368752 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:06.868711899 +0000 UTC m=+168.867661148 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:06 crc kubenswrapper[4691]: I1124 08:00:06.368805 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:06 crc kubenswrapper[4691]: E1124 08:00:06.369175 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:06.869165962 +0000 UTC m=+168.868115211 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:06 crc kubenswrapper[4691]: I1124 08:00:06.380819 4691 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-sph7r container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Nov 24 08:00:06 crc kubenswrapper[4691]: I1124 08:00:06.380866 4691 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-sph7r container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Nov 24 08:00:06 crc kubenswrapper[4691]: I1124 08:00:06.380892 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-sph7r" podUID="278ddf9e-14a8-43dd-820a-bfda668bbce1" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" Nov 24 08:00:06 crc kubenswrapper[4691]: I1124 08:00:06.380941 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-sph7r" podUID="278ddf9e-14a8-43dd-820a-bfda668bbce1" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" Nov 24 08:00:06 crc kubenswrapper[4691]: I1124 08:00:06.469668 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:06 crc kubenswrapper[4691]: E1124 08:00:06.470161 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:06.97014464 +0000 UTC m=+168.969093889 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:06 crc kubenswrapper[4691]: I1124 08:00:06.547946 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" event={"ID":"4db5010a-367e-473a-8e9e-56febfd76781","Type":"ContainerStarted","Data":"73c0af5bc09519729164b7c568094a451e68fa68738a41c3386f0d8d7ace2634"} Nov 24 08:00:06 crc kubenswrapper[4691]: I1124 08:00:06.549519 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-98whr" event={"ID":"21147e4f-4335-4c12-9a81-aa333d8301db","Type":"ContainerStarted","Data":"cc63ae0712503eb55301d45c785b237be51a124147381899387de04c3ac2859f"} Nov 24 08:00:06 crc kubenswrapper[4691]: I1124 08:00:06.549571 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-98whr" event={"ID":"21147e4f-4335-4c12-9a81-aa333d8301db","Type":"ContainerStarted","Data":"f0309a93ecd931d5a9d84affbafecf5f187ada9cbe78be22e0eaf365d1600c74"} Nov 24 08:00:06 crc kubenswrapper[4691]: I1124 08:00:06.551110 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-sr2d4" event={"ID":"a53c972e-9ea8-4b23-b43b-e432f037faec","Type":"ContainerStarted","Data":"54d77b8f8a9df444cb4858f7f649221448656d42c0b4ec3b06f3e2d280b6921d"} Nov 24 08:00:06 crc kubenswrapper[4691]: I1124 08:00:06.551987 4691 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-vq867 container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.33:8443/healthz\": dial tcp 10.217.0.33:8443: connect: connection refused" start-of-body= Nov 24 08:00:06 crc kubenswrapper[4691]: I1124 08:00:06.552063 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-vq867" podUID="9c8dc88a-6052-4b16-90e4-7377d8d6969d" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.33:8443/healthz\": dial tcp 10.217.0.33:8443: connect: connection refused" Nov 24 08:00:06 crc kubenswrapper[4691]: I1124 08:00:06.571579 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:06 crc kubenswrapper[4691]: E1124 08:00:06.571958 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:07.071941032 +0000 UTC m=+169.070890281 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:06 crc kubenswrapper[4691]: I1124 08:00:06.581483 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" podStartSLOduration=145.581465819 podStartE2EDuration="2m25.581465819s" podCreationTimestamp="2025-11-24 07:57:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:06.57702387 +0000 UTC m=+168.575973119" watchObservedRunningTime="2025-11-24 08:00:06.581465819 +0000 UTC m=+168.580415068" Nov 24 08:00:06 crc kubenswrapper[4691]: I1124 08:00:06.599425 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-sr2d4" podStartSLOduration=8.599409031 podStartE2EDuration="8.599409031s" podCreationTimestamp="2025-11-24 07:59:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:06.5973144 +0000 UTC m=+168.596263649" watchObservedRunningTime="2025-11-24 08:00:06.599409031 +0000 UTC m=+168.598358280" Nov 24 08:00:06 crc kubenswrapper[4691]: I1124 08:00:06.672833 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:06 crc kubenswrapper[4691]: E1124 08:00:06.672997 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:07.172975222 +0000 UTC m=+169.171924471 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:06 crc kubenswrapper[4691]: I1124 08:00:06.676767 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:06 crc kubenswrapper[4691]: E1124 08:00:06.677213 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:07.177192894 +0000 UTC m=+169.176142143 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:06 crc kubenswrapper[4691]: I1124 08:00:06.778764 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:06 crc kubenswrapper[4691]: E1124 08:00:06.778877 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:07.278860372 +0000 UTC m=+169.277809621 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:06 crc kubenswrapper[4691]: I1124 08:00:06.779079 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:06 crc kubenswrapper[4691]: E1124 08:00:06.779503 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:07.279493701 +0000 UTC m=+169.278442950 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:06 crc kubenswrapper[4691]: I1124 08:00:06.879814 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:06 crc kubenswrapper[4691]: E1124 08:00:06.879958 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:07.379938003 +0000 UTC m=+169.378887242 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:06 crc kubenswrapper[4691]: I1124 08:00:06.880535 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:06 crc kubenswrapper[4691]: E1124 08:00:06.881027 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:07.381005744 +0000 UTC m=+169.379954993 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:06 crc kubenswrapper[4691]: I1124 08:00:06.981566 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:06 crc kubenswrapper[4691]: E1124 08:00:06.981712 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:07.481688544 +0000 UTC m=+169.480637803 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:06 crc kubenswrapper[4691]: I1124 08:00:06.981850 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:06 crc kubenswrapper[4691]: E1124 08:00:06.982233 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:07.482215759 +0000 UTC m=+169.481165008 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:07 crc kubenswrapper[4691]: I1124 08:00:07.083245 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:07 crc kubenswrapper[4691]: E1124 08:00:07.083508 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:07.583419873 +0000 UTC m=+169.582369132 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:07 crc kubenswrapper[4691]: I1124 08:00:07.083655 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:07 crc kubenswrapper[4691]: E1124 08:00:07.084007 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:07.5839935 +0000 UTC m=+169.582942749 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:07 crc kubenswrapper[4691]: I1124 08:00:07.134439 4691 patch_prober.go:28] interesting pod/router-default-5444994796-njfp2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 08:00:07 crc kubenswrapper[4691]: [-]has-synced failed: reason withheld Nov 24 08:00:07 crc kubenswrapper[4691]: [+]process-running ok Nov 24 08:00:07 crc kubenswrapper[4691]: healthz check failed Nov 24 08:00:07 crc kubenswrapper[4691]: I1124 08:00:07.134526 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-njfp2" podUID="7ff52e52-aab5-4850-9d4d-4f427689c82b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 08:00:07 crc kubenswrapper[4691]: I1124 08:00:07.184897 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:07 crc kubenswrapper[4691]: E1124 08:00:07.185094 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:07.685071251 +0000 UTC m=+169.684020500 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:07 crc kubenswrapper[4691]: I1124 08:00:07.186771 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:07 crc kubenswrapper[4691]: E1124 08:00:07.187411 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:07.687382618 +0000 UTC m=+169.686331857 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:07 crc kubenswrapper[4691]: I1124 08:00:07.288499 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:07 crc kubenswrapper[4691]: E1124 08:00:07.288645 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:07.788623484 +0000 UTC m=+169.787572743 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:07 crc kubenswrapper[4691]: I1124 08:00:07.289020 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:07 crc kubenswrapper[4691]: E1124 08:00:07.289426 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:07.789416267 +0000 UTC m=+169.788365516 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:07 crc kubenswrapper[4691]: I1124 08:00:07.390339 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:07 crc kubenswrapper[4691]: E1124 08:00:07.390795 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:07.890775946 +0000 UTC m=+169.889725195 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:07 crc kubenswrapper[4691]: I1124 08:00:07.492315 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:07 crc kubenswrapper[4691]: E1124 08:00:07.492709 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:07.992695021 +0000 UTC m=+169.991644270 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:07 crc kubenswrapper[4691]: I1124 08:00:07.559861 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-22hbl" event={"ID":"f1001a2a-f7b8-46cc-b8e8-852fffb997e5","Type":"ContainerStarted","Data":"4a9d5021f6fae684717cfd593adcfbba01aae2a842ab82bab257f7ed5d120feb"} Nov 24 08:00:07 crc kubenswrapper[4691]: I1124 08:00:07.561844 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-98whr" event={"ID":"21147e4f-4335-4c12-9a81-aa333d8301db","Type":"ContainerStarted","Data":"c06d915d2dbac0ea0bbf7f969012c36d0598a4e97ba79caa365090d9b7f84165"} Nov 24 08:00:07 crc kubenswrapper[4691]: I1124 08:00:07.562735 4691 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-vq867 container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.33:8443/healthz\": dial tcp 10.217.0.33:8443: connect: connection refused" start-of-body= Nov 24 08:00:07 crc kubenswrapper[4691]: I1124 08:00:07.562805 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-vq867" podUID="9c8dc88a-6052-4b16-90e4-7377d8d6969d" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.33:8443/healthz\": dial tcp 10.217.0.33:8443: connect: connection refused" Nov 24 08:00:07 crc kubenswrapper[4691]: I1124 08:00:07.563028 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-sr2d4" Nov 24 08:00:07 crc kubenswrapper[4691]: I1124 08:00:07.593434 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:07 crc kubenswrapper[4691]: E1124 08:00:07.593624 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:08.093578686 +0000 UTC m=+170.092527955 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:07 crc kubenswrapper[4691]: I1124 08:00:07.593846 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:07 crc kubenswrapper[4691]: E1124 08:00:07.594467 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:08.094425131 +0000 UTC m=+170.093374570 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:07 crc kubenswrapper[4691]: I1124 08:00:07.594720 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-98whr" podStartSLOduration=146.594693809 podStartE2EDuration="2m26.594693809s" podCreationTimestamp="2025-11-24 07:57:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:07.592086153 +0000 UTC m=+169.591035402" watchObservedRunningTime="2025-11-24 08:00:07.594693809 +0000 UTC m=+169.593643058" Nov 24 08:00:07 crc kubenswrapper[4691]: I1124 08:00:07.695909 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:07 crc kubenswrapper[4691]: E1124 08:00:07.696321 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:08.196293535 +0000 UTC m=+170.195242804 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:07 crc kubenswrapper[4691]: I1124 08:00:07.797947 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:07 crc kubenswrapper[4691]: E1124 08:00:07.798392 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:08.298377735 +0000 UTC m=+170.297326984 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:07 crc kubenswrapper[4691]: I1124 08:00:07.898694 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:07 crc kubenswrapper[4691]: E1124 08:00:07.899225 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:08.399200228 +0000 UTC m=+170.398149477 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:07 crc kubenswrapper[4691]: I1124 08:00:07.899744 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:07 crc kubenswrapper[4691]: E1124 08:00:07.900235 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:08.400214618 +0000 UTC m=+170.399163857 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:08 crc kubenswrapper[4691]: I1124 08:00:08.001473 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:08 crc kubenswrapper[4691]: E1124 08:00:08.001880 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:08.501860885 +0000 UTC m=+170.500810124 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:08 crc kubenswrapper[4691]: I1124 08:00:08.102918 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:08 crc kubenswrapper[4691]: E1124 08:00:08.103310 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:08.603295596 +0000 UTC m=+170.602244845 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:08 crc kubenswrapper[4691]: I1124 08:00:08.134197 4691 patch_prober.go:28] interesting pod/router-default-5444994796-njfp2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 08:00:08 crc kubenswrapper[4691]: [-]has-synced failed: reason withheld Nov 24 08:00:08 crc kubenswrapper[4691]: [+]process-running ok Nov 24 08:00:08 crc kubenswrapper[4691]: healthz check failed Nov 24 08:00:08 crc kubenswrapper[4691]: I1124 08:00:08.134696 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-njfp2" podUID="7ff52e52-aab5-4850-9d4d-4f427689c82b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 08:00:08 crc kubenswrapper[4691]: I1124 08:00:08.205281 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:08 crc kubenswrapper[4691]: E1124 08:00:08.205482 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:08.705440918 +0000 UTC m=+170.704390167 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:08 crc kubenswrapper[4691]: I1124 08:00:08.205724 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:08 crc kubenswrapper[4691]: E1124 08:00:08.206055 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:08.706048636 +0000 UTC m=+170.704997885 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:08 crc kubenswrapper[4691]: I1124 08:00:08.307130 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:08 crc kubenswrapper[4691]: E1124 08:00:08.307364 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:08.807330403 +0000 UTC m=+170.806279652 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:08 crc kubenswrapper[4691]: I1124 08:00:08.307549 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:08 crc kubenswrapper[4691]: E1124 08:00:08.307860 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:08.807846358 +0000 UTC m=+170.806795607 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:08 crc kubenswrapper[4691]: I1124 08:00:08.408981 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:08 crc kubenswrapper[4691]: E1124 08:00:08.409195 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:08.909161836 +0000 UTC m=+170.908111085 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:08 crc kubenswrapper[4691]: I1124 08:00:08.409814 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:08 crc kubenswrapper[4691]: E1124 08:00:08.410204 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:08.910195146 +0000 UTC m=+170.909144395 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:08 crc kubenswrapper[4691]: I1124 08:00:08.510648 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:08 crc kubenswrapper[4691]: E1124 08:00:08.511226 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:09.011174674 +0000 UTC m=+171.010123923 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:08 crc kubenswrapper[4691]: I1124 08:00:08.568409 4691 generic.go:334] "Generic (PLEG): container finished" podID="72c587e8-bc91-4e8f-a545-e01d47139d1d" containerID="b66da3efaff2f4a42d58550587f53890f6a61eb998bd1c65fb6cf3a611c0a8c0" exitCode=0 Nov 24 08:00:08 crc kubenswrapper[4691]: I1124 08:00:08.568484 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399520-x8l49" event={"ID":"72c587e8-bc91-4e8f-a545-e01d47139d1d","Type":"ContainerDied","Data":"b66da3efaff2f4a42d58550587f53890f6a61eb998bd1c65fb6cf3a611c0a8c0"} Nov 24 08:00:08 crc kubenswrapper[4691]: I1124 08:00:08.612540 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:08 crc kubenswrapper[4691]: E1124 08:00:08.612966 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:09.112947345 +0000 UTC m=+171.111896594 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:08 crc kubenswrapper[4691]: I1124 08:00:08.714253 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:08 crc kubenswrapper[4691]: E1124 08:00:08.715245 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:09.21522314 +0000 UTC m=+171.214172389 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:08 crc kubenswrapper[4691]: I1124 08:00:08.821802 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:08 crc kubenswrapper[4691]: E1124 08:00:08.822671 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:09.322652616 +0000 UTC m=+171.321601865 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:08 crc kubenswrapper[4691]: I1124 08:00:08.923250 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:08 crc kubenswrapper[4691]: E1124 08:00:08.923662 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:09.423624583 +0000 UTC m=+171.422573832 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:08 crc kubenswrapper[4691]: I1124 08:00:08.924246 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:08 crc kubenswrapper[4691]: E1124 08:00:08.924798 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:09.424775556 +0000 UTC m=+171.423724815 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.025733 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:09 crc kubenswrapper[4691]: E1124 08:00:09.026001 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:09.52596996 +0000 UTC m=+171.524919209 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.026569 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:09 crc kubenswrapper[4691]: E1124 08:00:09.027026 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:09.527006771 +0000 UTC m=+171.525956020 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.127980 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:09 crc kubenswrapper[4691]: E1124 08:00:09.128416 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:09.62839194 +0000 UTC m=+171.627341189 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.128836 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:09 crc kubenswrapper[4691]: E1124 08:00:09.129261 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:09.629251665 +0000 UTC m=+171.628200914 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.136636 4691 patch_prober.go:28] interesting pod/router-default-5444994796-njfp2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 08:00:09 crc kubenswrapper[4691]: [-]has-synced failed: reason withheld Nov 24 08:00:09 crc kubenswrapper[4691]: [+]process-running ok Nov 24 08:00:09 crc kubenswrapper[4691]: healthz check failed Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.137071 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-njfp2" podUID="7ff52e52-aab5-4850-9d4d-4f427689c82b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.231190 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:09 crc kubenswrapper[4691]: E1124 08:00:09.232862 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:09.732838079 +0000 UTC m=+171.731787328 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.333971 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:09 crc kubenswrapper[4691]: E1124 08:00:09.334904 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:09.834888888 +0000 UTC m=+171.833838137 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.388323 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-sph7r" Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.443130 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:09 crc kubenswrapper[4691]: E1124 08:00:09.443807 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:09.943769826 +0000 UTC m=+171.942719075 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.443994 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:09 crc kubenswrapper[4691]: E1124 08:00:09.444566 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:09.944550739 +0000 UTC m=+171.943499988 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.545765 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:09 crc kubenswrapper[4691]: E1124 08:00:09.546038 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:10.04599423 +0000 UTC m=+172.044943479 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.546336 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:09 crc kubenswrapper[4691]: E1124 08:00:09.546832 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:10.046820255 +0000 UTC m=+172.045769674 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.647908 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:09 crc kubenswrapper[4691]: E1124 08:00:09.648130 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:10.148092711 +0000 UTC m=+172.147041970 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.648245 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:09 crc kubenswrapper[4691]: E1124 08:00:09.648655 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:10.148639387 +0000 UTC m=+172.147588626 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.734883 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-5r6nd"] Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.736359 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5r6nd" Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.743049 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.749003 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:09 crc kubenswrapper[4691]: E1124 08:00:09.749518 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:10.249498481 +0000 UTC m=+172.248447730 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.751983 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5r6nd"] Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.850973 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e-catalog-content\") pod \"certified-operators-5r6nd\" (UID: \"ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e\") " pod="openshift-marketplace/certified-operators-5r6nd" Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.851041 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.851134 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2dlgz\" (UniqueName: \"kubernetes.io/projected/ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e-kube-api-access-2dlgz\") pod \"certified-operators-5r6nd\" (UID: \"ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e\") " pod="openshift-marketplace/certified-operators-5r6nd" Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.851187 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e-utilities\") pod \"certified-operators-5r6nd\" (UID: \"ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e\") " pod="openshift-marketplace/certified-operators-5r6nd" Nov 24 08:00:09 crc kubenswrapper[4691]: E1124 08:00:09.851538 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:10.35151543 +0000 UTC m=+172.350464679 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.936515 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-pnnmn"] Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.937414 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pnnmn" Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.944702 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.952441 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.953032 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2dlgz\" (UniqueName: \"kubernetes.io/projected/ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e-kube-api-access-2dlgz\") pod \"certified-operators-5r6nd\" (UID: \"ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e\") " pod="openshift-marketplace/certified-operators-5r6nd" Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.953086 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e-utilities\") pod \"certified-operators-5r6nd\" (UID: \"ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e\") " pod="openshift-marketplace/certified-operators-5r6nd" Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.953123 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e-catalog-content\") pod \"certified-operators-5r6nd\" (UID: \"ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e\") " pod="openshift-marketplace/certified-operators-5r6nd" Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.953720 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e-catalog-content\") pod \"certified-operators-5r6nd\" (UID: \"ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e\") " pod="openshift-marketplace/certified-operators-5r6nd" Nov 24 08:00:09 crc kubenswrapper[4691]: E1124 08:00:09.953836 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:10.453810386 +0000 UTC m=+172.452759635 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.954358 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e-utilities\") pod \"certified-operators-5r6nd\" (UID: \"ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e\") " pod="openshift-marketplace/certified-operators-5r6nd" Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.956165 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pnnmn"] Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.981067 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.981723 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.987648 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 24 08:00:09 crc kubenswrapper[4691]: I1124 08:00:09.987804 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.001782 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2dlgz\" (UniqueName: \"kubernetes.io/projected/ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e-kube-api-access-2dlgz\") pod \"certified-operators-5r6nd\" (UID: \"ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e\") " pod="openshift-marketplace/certified-operators-5r6nd" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.043102 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.056133 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9312bc5d-54a5-4172-9674-1afebef9cc98-catalog-content\") pod \"community-operators-pnnmn\" (UID: \"9312bc5d-54a5-4172-9674-1afebef9cc98\") " pod="openshift-marketplace/community-operators-pnnmn" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.056188 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cpq4c\" (UniqueName: \"kubernetes.io/projected/9312bc5d-54a5-4172-9674-1afebef9cc98-kube-api-access-cpq4c\") pod \"community-operators-pnnmn\" (UID: \"9312bc5d-54a5-4172-9674-1afebef9cc98\") " pod="openshift-marketplace/community-operators-pnnmn" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.056241 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.056273 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9312bc5d-54a5-4172-9674-1afebef9cc98-utilities\") pod \"community-operators-pnnmn\" (UID: \"9312bc5d-54a5-4172-9674-1afebef9cc98\") " pod="openshift-marketplace/community-operators-pnnmn" Nov 24 08:00:10 crc kubenswrapper[4691]: E1124 08:00:10.056768 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:10.556749441 +0000 UTC m=+172.555698690 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.061953 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5r6nd" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.135070 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-mszmq"] Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.143946 4691 patch_prober.go:28] interesting pod/router-default-5444994796-njfp2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 08:00:10 crc kubenswrapper[4691]: [-]has-synced failed: reason withheld Nov 24 08:00:10 crc kubenswrapper[4691]: [+]process-running ok Nov 24 08:00:10 crc kubenswrapper[4691]: healthz check failed Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.144010 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-njfp2" podUID="7ff52e52-aab5-4850-9d4d-4f427689c82b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.146736 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mszmq" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.158142 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.158479 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9312bc5d-54a5-4172-9674-1afebef9cc98-utilities\") pod \"community-operators-pnnmn\" (UID: \"9312bc5d-54a5-4172-9674-1afebef9cc98\") " pod="openshift-marketplace/community-operators-pnnmn" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.158528 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3fa49d03-3c6c-42c1-a0ce-d462695f1e2c-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"3fa49d03-3c6c-42c1-a0ce-d462695f1e2c\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.158591 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3fa49d03-3c6c-42c1-a0ce-d462695f1e2c-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"3fa49d03-3c6c-42c1-a0ce-d462695f1e2c\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.158626 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9312bc5d-54a5-4172-9674-1afebef9cc98-catalog-content\") pod \"community-operators-pnnmn\" (UID: \"9312bc5d-54a5-4172-9674-1afebef9cc98\") " pod="openshift-marketplace/community-operators-pnnmn" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.158652 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cpq4c\" (UniqueName: \"kubernetes.io/projected/9312bc5d-54a5-4172-9674-1afebef9cc98-kube-api-access-cpq4c\") pod \"community-operators-pnnmn\" (UID: \"9312bc5d-54a5-4172-9674-1afebef9cc98\") " pod="openshift-marketplace/community-operators-pnnmn" Nov 24 08:00:10 crc kubenswrapper[4691]: E1124 08:00:10.159137 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:10.659097408 +0000 UTC m=+172.658046657 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.161515 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9312bc5d-54a5-4172-9674-1afebef9cc98-catalog-content\") pod \"community-operators-pnnmn\" (UID: \"9312bc5d-54a5-4172-9674-1afebef9cc98\") " pod="openshift-marketplace/community-operators-pnnmn" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.162068 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399520-x8l49" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.162180 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9312bc5d-54a5-4172-9674-1afebef9cc98-utilities\") pod \"community-operators-pnnmn\" (UID: \"9312bc5d-54a5-4172-9674-1afebef9cc98\") " pod="openshift-marketplace/community-operators-pnnmn" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.163350 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-mszmq"] Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.201575 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cpq4c\" (UniqueName: \"kubernetes.io/projected/9312bc5d-54a5-4172-9674-1afebef9cc98-kube-api-access-cpq4c\") pod \"community-operators-pnnmn\" (UID: \"9312bc5d-54a5-4172-9674-1afebef9cc98\") " pod="openshift-marketplace/community-operators-pnnmn" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.230134 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-cbkxk" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.260149 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/72c587e8-bc91-4e8f-a545-e01d47139d1d-config-volume\") pod \"72c587e8-bc91-4e8f-a545-e01d47139d1d\" (UID: \"72c587e8-bc91-4e8f-a545-e01d47139d1d\") " Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.260208 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-skgjb\" (UniqueName: \"kubernetes.io/projected/72c587e8-bc91-4e8f-a545-e01d47139d1d-kube-api-access-skgjb\") pod \"72c587e8-bc91-4e8f-a545-e01d47139d1d\" (UID: \"72c587e8-bc91-4e8f-a545-e01d47139d1d\") " Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.260475 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/72c587e8-bc91-4e8f-a545-e01d47139d1d-secret-volume\") pod \"72c587e8-bc91-4e8f-a545-e01d47139d1d\" (UID: \"72c587e8-bc91-4e8f-a545-e01d47139d1d\") " Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.260728 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3fa49d03-3c6c-42c1-a0ce-d462695f1e2c-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"3fa49d03-3c6c-42c1-a0ce-d462695f1e2c\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.260776 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7cd069de-809b-4bdf-8dad-51f00b26dcb0-utilities\") pod \"certified-operators-mszmq\" (UID: \"7cd069de-809b-4bdf-8dad-51f00b26dcb0\") " pod="openshift-marketplace/certified-operators-mszmq" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.260796 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7cd069de-809b-4bdf-8dad-51f00b26dcb0-catalog-content\") pod \"certified-operators-mszmq\" (UID: \"7cd069de-809b-4bdf-8dad-51f00b26dcb0\") " pod="openshift-marketplace/certified-operators-mszmq" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.260833 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xlpmq\" (UniqueName: \"kubernetes.io/projected/7cd069de-809b-4bdf-8dad-51f00b26dcb0-kube-api-access-xlpmq\") pod \"certified-operators-mszmq\" (UID: \"7cd069de-809b-4bdf-8dad-51f00b26dcb0\") " pod="openshift-marketplace/certified-operators-mszmq" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.260860 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3fa49d03-3c6c-42c1-a0ce-d462695f1e2c-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"3fa49d03-3c6c-42c1-a0ce-d462695f1e2c\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.260915 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:10 crc kubenswrapper[4691]: E1124 08:00:10.261310 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:10.761293272 +0000 UTC m=+172.760242521 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.262283 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/72c587e8-bc91-4e8f-a545-e01d47139d1d-config-volume" (OuterVolumeSpecName: "config-volume") pod "72c587e8-bc91-4e8f-a545-e01d47139d1d" (UID: "72c587e8-bc91-4e8f-a545-e01d47139d1d"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.262603 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3fa49d03-3c6c-42c1-a0ce-d462695f1e2c-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"3fa49d03-3c6c-42c1-a0ce-d462695f1e2c\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.274119 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/72c587e8-bc91-4e8f-a545-e01d47139d1d-kube-api-access-skgjb" (OuterVolumeSpecName: "kube-api-access-skgjb") pod "72c587e8-bc91-4e8f-a545-e01d47139d1d" (UID: "72c587e8-bc91-4e8f-a545-e01d47139d1d"). InnerVolumeSpecName "kube-api-access-skgjb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.275098 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72c587e8-bc91-4e8f-a545-e01d47139d1d-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "72c587e8-bc91-4e8f-a545-e01d47139d1d" (UID: "72c587e8-bc91-4e8f-a545-e01d47139d1d"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.288277 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3fa49d03-3c6c-42c1-a0ce-d462695f1e2c-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"3fa49d03-3c6c-42c1-a0ce-d462695f1e2c\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.294912 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pnnmn" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.335801 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-bx62n"] Nov 24 08:00:10 crc kubenswrapper[4691]: E1124 08:00:10.336021 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72c587e8-bc91-4e8f-a545-e01d47139d1d" containerName="collect-profiles" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.336036 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="72c587e8-bc91-4e8f-a545-e01d47139d1d" containerName="collect-profiles" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.336150 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="72c587e8-bc91-4e8f-a545-e01d47139d1d" containerName="collect-profiles" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.336959 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bx62n" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.338636 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.359487 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.360297 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.362204 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:10 crc kubenswrapper[4691]: E1124 08:00:10.362609 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:10.862573389 +0000 UTC m=+172.861522638 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.362643 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.362701 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.362855 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.362951 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7cd069de-809b-4bdf-8dad-51f00b26dcb0-utilities\") pod \"certified-operators-mszmq\" (UID: \"7cd069de-809b-4bdf-8dad-51f00b26dcb0\") " pod="openshift-marketplace/certified-operators-mszmq" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.362986 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7cd069de-809b-4bdf-8dad-51f00b26dcb0-catalog-content\") pod \"certified-operators-mszmq\" (UID: \"7cd069de-809b-4bdf-8dad-51f00b26dcb0\") " pod="openshift-marketplace/certified-operators-mszmq" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.363030 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xlpmq\" (UniqueName: \"kubernetes.io/projected/7cd069de-809b-4bdf-8dad-51f00b26dcb0-kube-api-access-xlpmq\") pod \"certified-operators-mszmq\" (UID: \"7cd069de-809b-4bdf-8dad-51f00b26dcb0\") " pod="openshift-marketplace/certified-operators-mszmq" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.363428 4691 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/72c587e8-bc91-4e8f-a545-e01d47139d1d-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.365425 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7cd069de-809b-4bdf-8dad-51f00b26dcb0-utilities\") pod \"certified-operators-mszmq\" (UID: \"7cd069de-809b-4bdf-8dad-51f00b26dcb0\") " pod="openshift-marketplace/certified-operators-mszmq" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.365667 4691 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/72c587e8-bc91-4e8f-a545-e01d47139d1d-config-volume\") on node \"crc\" DevicePath \"\"" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.365687 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7cd069de-809b-4bdf-8dad-51f00b26dcb0-catalog-content\") pod \"certified-operators-mszmq\" (UID: \"7cd069de-809b-4bdf-8dad-51f00b26dcb0\") " pod="openshift-marketplace/certified-operators-mszmq" Nov 24 08:00:10 crc kubenswrapper[4691]: E1124 08:00:10.365931 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:10.865912086 +0000 UTC m=+172.864861335 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.366101 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-skgjb\" (UniqueName: \"kubernetes.io/projected/72c587e8-bc91-4e8f-a545-e01d47139d1d-kube-api-access-skgjb\") on node \"crc\" DevicePath \"\"" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.368134 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.370799 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.378932 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bx62n"] Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.392809 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.403523 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xlpmq\" (UniqueName: \"kubernetes.io/projected/7cd069de-809b-4bdf-8dad-51f00b26dcb0-kube-api-access-xlpmq\") pod \"certified-operators-mszmq\" (UID: \"7cd069de-809b-4bdf-8dad-51f00b26dcb0\") " pod="openshift-marketplace/certified-operators-mszmq" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.411885 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-h9wgf" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.413156 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-h9wgf" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.431121 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.450677 4691 patch_prober.go:28] interesting pod/console-f9d7485db-h9wgf container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.10:8443/health\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.450748 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-h9wgf" podUID="d611a7c5-68d8-4ea5-88b7-d3fad9baef65" containerName="console" probeResult="failure" output="Get \"https://10.217.0.10:8443/health\": dial tcp 10.217.0.10:8443: connect: connection refused" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.467797 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.468534 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/310b3b1c-76c2-4eca-afab-78c468d3b2a8-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"310b3b1c-76c2-4eca-afab-78c468d3b2a8\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.468568 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3ebf582-98e0-4899-885b-22a4289b2b4d-utilities\") pod \"community-operators-bx62n\" (UID: \"d3ebf582-98e0-4899-885b-22a4289b2b4d\") " pod="openshift-marketplace/community-operators-bx62n" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.468646 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3ebf582-98e0-4899-885b-22a4289b2b4d-catalog-content\") pod \"community-operators-bx62n\" (UID: \"d3ebf582-98e0-4899-885b-22a4289b2b4d\") " pod="openshift-marketplace/community-operators-bx62n" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.468683 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wcvc7\" (UniqueName: \"kubernetes.io/projected/d3ebf582-98e0-4899-885b-22a4289b2b4d-kube-api-access-wcvc7\") pod \"community-operators-bx62n\" (UID: \"d3ebf582-98e0-4899-885b-22a4289b2b4d\") " pod="openshift-marketplace/community-operators-bx62n" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.468708 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/310b3b1c-76c2-4eca-afab-78c468d3b2a8-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"310b3b1c-76c2-4eca-afab-78c468d3b2a8\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 08:00:10 crc kubenswrapper[4691]: E1124 08:00:10.468843 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:10.96882523 +0000 UTC m=+172.967774479 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.476956 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mszmq" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.572201 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3ebf582-98e0-4899-885b-22a4289b2b4d-catalog-content\") pod \"community-operators-bx62n\" (UID: \"d3ebf582-98e0-4899-885b-22a4289b2b4d\") " pod="openshift-marketplace/community-operators-bx62n" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.572274 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.572296 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wcvc7\" (UniqueName: \"kubernetes.io/projected/d3ebf582-98e0-4899-885b-22a4289b2b4d-kube-api-access-wcvc7\") pod \"community-operators-bx62n\" (UID: \"d3ebf582-98e0-4899-885b-22a4289b2b4d\") " pod="openshift-marketplace/community-operators-bx62n" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.572324 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/310b3b1c-76c2-4eca-afab-78c468d3b2a8-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"310b3b1c-76c2-4eca-afab-78c468d3b2a8\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.572430 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/310b3b1c-76c2-4eca-afab-78c468d3b2a8-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"310b3b1c-76c2-4eca-afab-78c468d3b2a8\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.572480 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3ebf582-98e0-4899-885b-22a4289b2b4d-utilities\") pod \"community-operators-bx62n\" (UID: \"d3ebf582-98e0-4899-885b-22a4289b2b4d\") " pod="openshift-marketplace/community-operators-bx62n" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.573723 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3ebf582-98e0-4899-885b-22a4289b2b4d-catalog-content\") pod \"community-operators-bx62n\" (UID: \"d3ebf582-98e0-4899-885b-22a4289b2b4d\") " pod="openshift-marketplace/community-operators-bx62n" Nov 24 08:00:10 crc kubenswrapper[4691]: E1124 08:00:10.574241 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:11.074228437 +0000 UTC m=+173.073177686 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.575324 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/310b3b1c-76c2-4eca-afab-78c468d3b2a8-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"310b3b1c-76c2-4eca-afab-78c468d3b2a8\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.578840 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3ebf582-98e0-4899-885b-22a4289b2b4d-utilities\") pod \"community-operators-bx62n\" (UID: \"d3ebf582-98e0-4899-885b-22a4289b2b4d\") " pod="openshift-marketplace/community-operators-bx62n" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.604729 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399520-x8l49" event={"ID":"72c587e8-bc91-4e8f-a545-e01d47139d1d","Type":"ContainerDied","Data":"23fa03fc0bec2c3eb621b43324cc677243d819e14834ab45f1c17cf9539df9df"} Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.604812 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="23fa03fc0bec2c3eb621b43324cc677243d819e14834ab45f1c17cf9539df9df" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.604907 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399520-x8l49" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.616042 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/310b3b1c-76c2-4eca-afab-78c468d3b2a8-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"310b3b1c-76c2-4eca-afab-78c468d3b2a8\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.624408 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m459v" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.630375 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wcvc7\" (UniqueName: \"kubernetes.io/projected/d3ebf582-98e0-4899-885b-22a4289b2b4d-kube-api-access-wcvc7\") pod \"community-operators-bx62n\" (UID: \"d3ebf582-98e0-4899-885b-22a4289b2b4d\") " pod="openshift-marketplace/community-operators-bx62n" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.645909 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5r6nd"] Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.679288 4691 patch_prober.go:28] interesting pod/downloads-7954f5f757-7cng4 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.679363 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-7cng4" podUID="1b8890e7-15c8-4467-b31b-493b565c584a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.679382 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:10 crc kubenswrapper[4691]: E1124 08:00:10.679943 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:11.179922782 +0000 UTC m=+173.178872031 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.680039 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bx62n" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.681156 4691 patch_prober.go:28] interesting pod/downloads-7954f5f757-7cng4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.681196 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7cng4" podUID="1b8890e7-15c8-4467-b31b-493b565c584a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.690997 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.738946 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.738990 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.755019 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-7p2br" Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.790322 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:10 crc kubenswrapper[4691]: E1124 08:00:10.791774 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:11.291762056 +0000 UTC m=+173.290711305 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.893787 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:10 crc kubenswrapper[4691]: E1124 08:00:10.895657 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:11.395639188 +0000 UTC m=+173.394588437 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.925640 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pnnmn"] Nov 24 08:00:10 crc kubenswrapper[4691]: I1124 08:00:10.971111 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:10.997810 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:11 crc kubenswrapper[4691]: E1124 08:00:10.998144 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:11.49813084 +0000 UTC m=+173.497080089 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.099376 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:11 crc kubenswrapper[4691]: E1124 08:00:11.099844 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:11.599826709 +0000 UTC m=+173.598775958 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.131262 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-njfp2" Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.139673 4691 patch_prober.go:28] interesting pod/router-default-5444994796-njfp2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 08:00:11 crc kubenswrapper[4691]: [-]has-synced failed: reason withheld Nov 24 08:00:11 crc kubenswrapper[4691]: [+]process-running ok Nov 24 08:00:11 crc kubenswrapper[4691]: healthz check failed Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.139734 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-njfp2" podUID="7ff52e52-aab5-4850-9d4d-4f427689c82b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.164287 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-mszmq"] Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.203230 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-q96bx" Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.212248 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:11 crc kubenswrapper[4691]: E1124 08:00:11.213884 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:11.713868737 +0000 UTC m=+173.712817986 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:11 crc kubenswrapper[4691]: W1124 08:00:11.239498 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7cd069de_809b_4bdf_8dad_51f00b26dcb0.slice/crio-b61c83c955cebff1d65286977cf223aa1b707ffb229bfe422a0ff1953f6368f3 WatchSource:0}: Error finding container b61c83c955cebff1d65286977cf223aa1b707ffb229bfe422a0ff1953f6368f3: Status 404 returned error can't find the container with id b61c83c955cebff1d65286977cf223aa1b707ffb229bfe422a0ff1953f6368f3 Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.314736 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:11 crc kubenswrapper[4691]: E1124 08:00:11.315705 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:11.815689559 +0000 UTC m=+173.814638808 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.418012 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:11 crc kubenswrapper[4691]: E1124 08:00:11.418382 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:11.918367547 +0000 UTC m=+173.917316796 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.460725 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.505206 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bx62n"] Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.510489 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-vq867" Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.519305 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:11 crc kubenswrapper[4691]: E1124 08:00:11.519807 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:12.019791538 +0000 UTC m=+174.018740787 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:11 crc kubenswrapper[4691]: W1124 08:00:11.550764 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd3ebf582_98e0_4899_885b_22a4289b2b4d.slice/crio-f1a29cdefc91e0335d00ac1d2e823da9b5f4a33b1f6bb9daf0813b6f318329ad WatchSource:0}: Error finding container f1a29cdefc91e0335d00ac1d2e823da9b5f4a33b1f6bb9daf0813b6f318329ad: Status 404 returned error can't find the container with id f1a29cdefc91e0335d00ac1d2e823da9b5f4a33b1f6bb9daf0813b6f318329ad Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.620369 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"3fa49d03-3c6c-42c1-a0ce-d462695f1e2c","Type":"ContainerStarted","Data":"668735ffdfc67d9caea2efebb15ecf66163bcf4c4b70240d6b0151c8475aed4c"} Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.620954 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:11 crc kubenswrapper[4691]: E1124 08:00:11.621536 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:12.121517507 +0000 UTC m=+174.120466756 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.632957 4691 generic.go:334] "Generic (PLEG): container finished" podID="9312bc5d-54a5-4172-9674-1afebef9cc98" containerID="96806321aa83041c079d3bf2e4bb0f84dcabaea8204ecaff5dd74c45e4b0bb98" exitCode=0 Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.633259 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pnnmn" event={"ID":"9312bc5d-54a5-4172-9674-1afebef9cc98","Type":"ContainerDied","Data":"96806321aa83041c079d3bf2e4bb0f84dcabaea8204ecaff5dd74c45e4b0bb98"} Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.633293 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pnnmn" event={"ID":"9312bc5d-54a5-4172-9674-1afebef9cc98","Type":"ContainerStarted","Data":"f3ee436203739ba08ca4a5f9de7e007253fab5b7337c295a0e003566d95b022d"} Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.634718 4691 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.645242 4691 generic.go:334] "Generic (PLEG): container finished" podID="ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e" containerID="fb983d56ca99fd108b0e79acfee1af28612ccc9408fc295f4f212a09f4cfcae6" exitCode=0 Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.645352 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5r6nd" event={"ID":"ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e","Type":"ContainerDied","Data":"fb983d56ca99fd108b0e79acfee1af28612ccc9408fc295f4f212a09f4cfcae6"} Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.645392 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5r6nd" event={"ID":"ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e","Type":"ContainerStarted","Data":"255789470c056f5e4bc3553e3aef2defcf5c03a8630c60976f6f648ea2e6c055"} Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.649927 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bx62n" event={"ID":"d3ebf582-98e0-4899-885b-22a4289b2b4d","Type":"ContainerStarted","Data":"f1a29cdefc91e0335d00ac1d2e823da9b5f4a33b1f6bb9daf0813b6f318329ad"} Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.652126 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"310b3b1c-76c2-4eca-afab-78c468d3b2a8","Type":"ContainerStarted","Data":"674548eac6d1494b50bb8ac7af40c6bf22a21f41d213794a02a07ffff17b6ead"} Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.659270 4691 generic.go:334] "Generic (PLEG): container finished" podID="7cd069de-809b-4bdf-8dad-51f00b26dcb0" containerID="6594b5b821388ecf9dd7bcdde7c6161d12feee7bc142f5b8ab8a965c72813a37" exitCode=0 Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.660158 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mszmq" event={"ID":"7cd069de-809b-4bdf-8dad-51f00b26dcb0","Type":"ContainerDied","Data":"6594b5b821388ecf9dd7bcdde7c6161d12feee7bc142f5b8ab8a965c72813a37"} Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.660193 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mszmq" event={"ID":"7cd069de-809b-4bdf-8dad-51f00b26dcb0","Type":"ContainerStarted","Data":"b61c83c955cebff1d65286977cf223aa1b707ffb229bfe422a0ff1953f6368f3"} Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.722953 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:11 crc kubenswrapper[4691]: E1124 08:00:11.724048 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:12.22402871 +0000 UTC m=+174.222977959 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.827731 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:11 crc kubenswrapper[4691]: E1124 08:00:11.829382 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:12.329366245 +0000 UTC m=+174.328315604 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.909527 4691 patch_prober.go:28] interesting pod/apiserver-76f77b778f-6b2ss container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 24 08:00:11 crc kubenswrapper[4691]: [+]log ok Nov 24 08:00:11 crc kubenswrapper[4691]: [+]etcd ok Nov 24 08:00:11 crc kubenswrapper[4691]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 24 08:00:11 crc kubenswrapper[4691]: [+]poststarthook/generic-apiserver-start-informers ok Nov 24 08:00:11 crc kubenswrapper[4691]: [+]poststarthook/max-in-flight-filter ok Nov 24 08:00:11 crc kubenswrapper[4691]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 24 08:00:11 crc kubenswrapper[4691]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 24 08:00:11 crc kubenswrapper[4691]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Nov 24 08:00:11 crc kubenswrapper[4691]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Nov 24 08:00:11 crc kubenswrapper[4691]: [+]poststarthook/project.openshift.io-projectcache ok Nov 24 08:00:11 crc kubenswrapper[4691]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 24 08:00:11 crc kubenswrapper[4691]: [+]poststarthook/openshift.io-startinformers ok Nov 24 08:00:11 crc kubenswrapper[4691]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 24 08:00:11 crc kubenswrapper[4691]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 24 08:00:11 crc kubenswrapper[4691]: livez check failed Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.909603 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" podUID="4db5010a-367e-473a-8e9e-56febfd76781" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.920137 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-2t866"] Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.922740 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2t866" Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.924516 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.928703 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:11 crc kubenswrapper[4691]: E1124 08:00:11.928923 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:12.42888547 +0000 UTC m=+174.427834719 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.929039 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:11 crc kubenswrapper[4691]: E1124 08:00:11.930465 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:12.430420815 +0000 UTC m=+174.429370064 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:11 crc kubenswrapper[4691]: I1124 08:00:11.931916 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2t866"] Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.030409 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:12 crc kubenswrapper[4691]: E1124 08:00:12.030649 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:12.5306052 +0000 UTC m=+174.529554449 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.030751 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.030835 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zgkzd\" (UniqueName: \"kubernetes.io/projected/50e404aa-af0b-471a-9289-ab9bb5317ffc-kube-api-access-zgkzd\") pod \"redhat-marketplace-2t866\" (UID: \"50e404aa-af0b-471a-9289-ab9bb5317ffc\") " pod="openshift-marketplace/redhat-marketplace-2t866" Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.031012 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/50e404aa-af0b-471a-9289-ab9bb5317ffc-utilities\") pod \"redhat-marketplace-2t866\" (UID: \"50e404aa-af0b-471a-9289-ab9bb5317ffc\") " pod="openshift-marketplace/redhat-marketplace-2t866" Nov 24 08:00:12 crc kubenswrapper[4691]: E1124 08:00:12.031202 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:12.531181336 +0000 UTC m=+174.530130585 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.031255 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/50e404aa-af0b-471a-9289-ab9bb5317ffc-catalog-content\") pod \"redhat-marketplace-2t866\" (UID: \"50e404aa-af0b-471a-9289-ab9bb5317ffc\") " pod="openshift-marketplace/redhat-marketplace-2t866" Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.132119 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.132345 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/50e404aa-af0b-471a-9289-ab9bb5317ffc-utilities\") pod \"redhat-marketplace-2t866\" (UID: \"50e404aa-af0b-471a-9289-ab9bb5317ffc\") " pod="openshift-marketplace/redhat-marketplace-2t866" Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.132430 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/50e404aa-af0b-471a-9289-ab9bb5317ffc-catalog-content\") pod \"redhat-marketplace-2t866\" (UID: \"50e404aa-af0b-471a-9289-ab9bb5317ffc\") " pod="openshift-marketplace/redhat-marketplace-2t866" Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.132497 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zgkzd\" (UniqueName: \"kubernetes.io/projected/50e404aa-af0b-471a-9289-ab9bb5317ffc-kube-api-access-zgkzd\") pod \"redhat-marketplace-2t866\" (UID: \"50e404aa-af0b-471a-9289-ab9bb5317ffc\") " pod="openshift-marketplace/redhat-marketplace-2t866" Nov 24 08:00:12 crc kubenswrapper[4691]: E1124 08:00:12.132634 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:12.632599147 +0000 UTC m=+174.631548416 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.133022 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/50e404aa-af0b-471a-9289-ab9bb5317ffc-utilities\") pod \"redhat-marketplace-2t866\" (UID: \"50e404aa-af0b-471a-9289-ab9bb5317ffc\") " pod="openshift-marketplace/redhat-marketplace-2t866" Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.133772 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/50e404aa-af0b-471a-9289-ab9bb5317ffc-catalog-content\") pod \"redhat-marketplace-2t866\" (UID: \"50e404aa-af0b-471a-9289-ab9bb5317ffc\") " pod="openshift-marketplace/redhat-marketplace-2t866" Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.136696 4691 patch_prober.go:28] interesting pod/router-default-5444994796-njfp2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 08:00:12 crc kubenswrapper[4691]: [-]has-synced failed: reason withheld Nov 24 08:00:12 crc kubenswrapper[4691]: [+]process-running ok Nov 24 08:00:12 crc kubenswrapper[4691]: healthz check failed Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.136765 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-njfp2" podUID="7ff52e52-aab5-4850-9d4d-4f427689c82b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.162708 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zgkzd\" (UniqueName: \"kubernetes.io/projected/50e404aa-af0b-471a-9289-ab9bb5317ffc-kube-api-access-zgkzd\") pod \"redhat-marketplace-2t866\" (UID: \"50e404aa-af0b-471a-9289-ab9bb5317ffc\") " pod="openshift-marketplace/redhat-marketplace-2t866" Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.233874 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:12 crc kubenswrapper[4691]: E1124 08:00:12.234370 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:12.734343357 +0000 UTC m=+174.733292766 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.248305 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2t866" Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.329622 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-xk274"] Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.335914 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:12 crc kubenswrapper[4691]: E1124 08:00:12.337131 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:12.837094007 +0000 UTC m=+174.836043256 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.337633 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:12 crc kubenswrapper[4691]: E1124 08:00:12.339121 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:12.839096315 +0000 UTC m=+174.838045564 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.346364 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xk274"] Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.346575 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xk274" Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.439590 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:12 crc kubenswrapper[4691]: E1124 08:00:12.439952 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:12.939898128 +0000 UTC m=+174.938847387 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.440120 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ph76\" (UniqueName: \"kubernetes.io/projected/a3e985ce-848b-49fc-99aa-8f9fc08820be-kube-api-access-5ph76\") pod \"redhat-marketplace-xk274\" (UID: \"a3e985ce-848b-49fc-99aa-8f9fc08820be\") " pod="openshift-marketplace/redhat-marketplace-xk274" Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.440262 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a3e985ce-848b-49fc-99aa-8f9fc08820be-catalog-content\") pod \"redhat-marketplace-xk274\" (UID: \"a3e985ce-848b-49fc-99aa-8f9fc08820be\") " pod="openshift-marketplace/redhat-marketplace-xk274" Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.440284 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a3e985ce-848b-49fc-99aa-8f9fc08820be-utilities\") pod \"redhat-marketplace-xk274\" (UID: \"a3e985ce-848b-49fc-99aa-8f9fc08820be\") " pod="openshift-marketplace/redhat-marketplace-xk274" Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.440316 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:12 crc kubenswrapper[4691]: E1124 08:00:12.440679 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:12.9406643 +0000 UTC m=+174.939613549 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.485076 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2t866"] Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.541893 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:12 crc kubenswrapper[4691]: E1124 08:00:12.542119 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:13.04208985 +0000 UTC m=+175.041039099 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.542231 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a3e985ce-848b-49fc-99aa-8f9fc08820be-catalog-content\") pod \"redhat-marketplace-xk274\" (UID: \"a3e985ce-848b-49fc-99aa-8f9fc08820be\") " pod="openshift-marketplace/redhat-marketplace-xk274" Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.542267 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a3e985ce-848b-49fc-99aa-8f9fc08820be-utilities\") pod \"redhat-marketplace-xk274\" (UID: \"a3e985ce-848b-49fc-99aa-8f9fc08820be\") " pod="openshift-marketplace/redhat-marketplace-xk274" Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.542305 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.542347 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ph76\" (UniqueName: \"kubernetes.io/projected/a3e985ce-848b-49fc-99aa-8f9fc08820be-kube-api-access-5ph76\") pod \"redhat-marketplace-xk274\" (UID: \"a3e985ce-848b-49fc-99aa-8f9fc08820be\") " pod="openshift-marketplace/redhat-marketplace-xk274" Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.542841 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a3e985ce-848b-49fc-99aa-8f9fc08820be-catalog-content\") pod \"redhat-marketplace-xk274\" (UID: \"a3e985ce-848b-49fc-99aa-8f9fc08820be\") " pod="openshift-marketplace/redhat-marketplace-xk274" Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.542883 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a3e985ce-848b-49fc-99aa-8f9fc08820be-utilities\") pod \"redhat-marketplace-xk274\" (UID: \"a3e985ce-848b-49fc-99aa-8f9fc08820be\") " pod="openshift-marketplace/redhat-marketplace-xk274" Nov 24 08:00:12 crc kubenswrapper[4691]: E1124 08:00:12.543044 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:13.043011217 +0000 UTC m=+175.041960676 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.564111 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ph76\" (UniqueName: \"kubernetes.io/projected/a3e985ce-848b-49fc-99aa-8f9fc08820be-kube-api-access-5ph76\") pod \"redhat-marketplace-xk274\" (UID: \"a3e985ce-848b-49fc-99aa-8f9fc08820be\") " pod="openshift-marketplace/redhat-marketplace-xk274" Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.644627 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:12 crc kubenswrapper[4691]: E1124 08:00:12.644744 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:13.144723336 +0000 UTC m=+175.143672585 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.645619 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:12 crc kubenswrapper[4691]: E1124 08:00:12.646133 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:13.146116257 +0000 UTC m=+175.145065506 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.669671 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"3fa49d03-3c6c-42c1-a0ce-d462695f1e2c","Type":"ContainerStarted","Data":"f361bdf8c56e68aae85f8a766056b1444146d06bf6785362f1cb4ba82e3ec258"} Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.678092 4691 generic.go:334] "Generic (PLEG): container finished" podID="d3ebf582-98e0-4899-885b-22a4289b2b4d" containerID="118c467bfe209c3ffaf7438f15173d4514116afa5bdd3f0e84c4aa486c3dec7e" exitCode=0 Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.678282 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bx62n" event={"ID":"d3ebf582-98e0-4899-885b-22a4289b2b4d","Type":"ContainerDied","Data":"118c467bfe209c3ffaf7438f15173d4514116afa5bdd3f0e84c4aa486c3dec7e"} Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.682416 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"310b3b1c-76c2-4eca-afab-78c468d3b2a8","Type":"ContainerStarted","Data":"4af608162ae44d7f508e1a9569d0bde8074cd2f483029b2ef41722c9a8f466cd"} Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.686314 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xk274" Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.686352 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2t866" event={"ID":"50e404aa-af0b-471a-9289-ab9bb5317ffc","Type":"ContainerStarted","Data":"573c89398c69dffdbd4752c8a40b50124e833972f1c44d4c46a252484aa75086"} Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.687979 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=3.687954444 podStartE2EDuration="3.687954444s" podCreationTimestamp="2025-11-24 08:00:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:12.686020688 +0000 UTC m=+174.684969937" watchObservedRunningTime="2025-11-24 08:00:12.687954444 +0000 UTC m=+174.686903693" Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.704932 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=2.704910557 podStartE2EDuration="2.704910557s" podCreationTimestamp="2025-11-24 08:00:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:12.704058283 +0000 UTC m=+174.703007542" watchObservedRunningTime="2025-11-24 08:00:12.704910557 +0000 UTC m=+174.703859806" Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.708436 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-22hbl" event={"ID":"f1001a2a-f7b8-46cc-b8e8-852fffb997e5","Type":"ContainerStarted","Data":"3c139addba3dd8b5c436bd77cb14bf2e8792382a79d6e3026aedcfc00f6b2477"} Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.708509 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-22hbl" event={"ID":"f1001a2a-f7b8-46cc-b8e8-852fffb997e5","Type":"ContainerStarted","Data":"d08e38da75db9e9c68f9212c3bbcc04362b26c68f326f672fdd246db76438ef7"} Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.746750 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:12 crc kubenswrapper[4691]: E1124 08:00:12.748149 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:13.248055913 +0000 UTC m=+175.247005172 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.848939 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:12 crc kubenswrapper[4691]: E1124 08:00:12.850345 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:13.350326458 +0000 UTC m=+175.349275797 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.914228 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-cbnjg"] Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.915258 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cbnjg" Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.917665 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.950601 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:12 crc kubenswrapper[4691]: E1124 08:00:12.953321 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:13.453283724 +0000 UTC m=+175.452232973 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.984221 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cbnjg"] Nov 24 08:00:12 crc kubenswrapper[4691]: I1124 08:00:12.989179 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xk274"] Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.037161 4691 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.056264 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d6wk6\" (UniqueName: \"kubernetes.io/projected/5c3c36fe-8ff0-4639-ae9c-e69785ea4611-kube-api-access-d6wk6\") pod \"redhat-operators-cbnjg\" (UID: \"5c3c36fe-8ff0-4639-ae9c-e69785ea4611\") " pod="openshift-marketplace/redhat-operators-cbnjg" Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.056353 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c3c36fe-8ff0-4639-ae9c-e69785ea4611-catalog-content\") pod \"redhat-operators-cbnjg\" (UID: \"5c3c36fe-8ff0-4639-ae9c-e69785ea4611\") " pod="openshift-marketplace/redhat-operators-cbnjg" Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.056379 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c3c36fe-8ff0-4639-ae9c-e69785ea4611-utilities\") pod \"redhat-operators-cbnjg\" (UID: \"5c3c36fe-8ff0-4639-ae9c-e69785ea4611\") " pod="openshift-marketplace/redhat-operators-cbnjg" Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.056427 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:13 crc kubenswrapper[4691]: E1124 08:00:13.056799 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:13.556786665 +0000 UTC m=+175.555735914 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.135072 4691 patch_prober.go:28] interesting pod/router-default-5444994796-njfp2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 08:00:13 crc kubenswrapper[4691]: [-]has-synced failed: reason withheld Nov 24 08:00:13 crc kubenswrapper[4691]: [+]process-running ok Nov 24 08:00:13 crc kubenswrapper[4691]: healthz check failed Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.135155 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-njfp2" podUID="7ff52e52-aab5-4850-9d4d-4f427689c82b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.158202 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.158525 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d6wk6\" (UniqueName: \"kubernetes.io/projected/5c3c36fe-8ff0-4639-ae9c-e69785ea4611-kube-api-access-d6wk6\") pod \"redhat-operators-cbnjg\" (UID: \"5c3c36fe-8ff0-4639-ae9c-e69785ea4611\") " pod="openshift-marketplace/redhat-operators-cbnjg" Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.158606 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c3c36fe-8ff0-4639-ae9c-e69785ea4611-catalog-content\") pod \"redhat-operators-cbnjg\" (UID: \"5c3c36fe-8ff0-4639-ae9c-e69785ea4611\") " pod="openshift-marketplace/redhat-operators-cbnjg" Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.158633 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c3c36fe-8ff0-4639-ae9c-e69785ea4611-utilities\") pod \"redhat-operators-cbnjg\" (UID: \"5c3c36fe-8ff0-4639-ae9c-e69785ea4611\") " pod="openshift-marketplace/redhat-operators-cbnjg" Nov 24 08:00:13 crc kubenswrapper[4691]: E1124 08:00:13.158974 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:13.658887756 +0000 UTC m=+175.657837145 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.159278 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c3c36fe-8ff0-4639-ae9c-e69785ea4611-utilities\") pod \"redhat-operators-cbnjg\" (UID: \"5c3c36fe-8ff0-4639-ae9c-e69785ea4611\") " pod="openshift-marketplace/redhat-operators-cbnjg" Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.159513 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c3c36fe-8ff0-4639-ae9c-e69785ea4611-catalog-content\") pod \"redhat-operators-cbnjg\" (UID: \"5c3c36fe-8ff0-4639-ae9c-e69785ea4611\") " pod="openshift-marketplace/redhat-operators-cbnjg" Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.182611 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d6wk6\" (UniqueName: \"kubernetes.io/projected/5c3c36fe-8ff0-4639-ae9c-e69785ea4611-kube-api-access-d6wk6\") pod \"redhat-operators-cbnjg\" (UID: \"5c3c36fe-8ff0-4639-ae9c-e69785ea4611\") " pod="openshift-marketplace/redhat-operators-cbnjg" Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.260479 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:13 crc kubenswrapper[4691]: E1124 08:00:13.260873 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:13.760860153 +0000 UTC m=+175.759809402 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.323215 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-2twgj"] Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.327627 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2twgj" Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.334840 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2twgj"] Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.371747 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:13 crc kubenswrapper[4691]: E1124 08:00:13.372479 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:13.872432149 +0000 UTC m=+175.871381398 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.394038 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cbnjg" Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.473312 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2f8a9dc-663e-4288-aa02-9e553a7b18a9-utilities\") pod \"redhat-operators-2twgj\" (UID: \"d2f8a9dc-663e-4288-aa02-9e553a7b18a9\") " pod="openshift-marketplace/redhat-operators-2twgj" Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.473353 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q9grw\" (UniqueName: \"kubernetes.io/projected/d2f8a9dc-663e-4288-aa02-9e553a7b18a9-kube-api-access-q9grw\") pod \"redhat-operators-2twgj\" (UID: \"d2f8a9dc-663e-4288-aa02-9e553a7b18a9\") " pod="openshift-marketplace/redhat-operators-2twgj" Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.473415 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.473469 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2f8a9dc-663e-4288-aa02-9e553a7b18a9-catalog-content\") pod \"redhat-operators-2twgj\" (UID: \"d2f8a9dc-663e-4288-aa02-9e553a7b18a9\") " pod="openshift-marketplace/redhat-operators-2twgj" Nov 24 08:00:13 crc kubenswrapper[4691]: E1124 08:00:13.473837 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:13.973825719 +0000 UTC m=+175.972774968 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.577159 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.577854 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q9grw\" (UniqueName: \"kubernetes.io/projected/d2f8a9dc-663e-4288-aa02-9e553a7b18a9-kube-api-access-q9grw\") pod \"redhat-operators-2twgj\" (UID: \"d2f8a9dc-663e-4288-aa02-9e553a7b18a9\") " pod="openshift-marketplace/redhat-operators-2twgj" Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.577891 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2f8a9dc-663e-4288-aa02-9e553a7b18a9-utilities\") pod \"redhat-operators-2twgj\" (UID: \"d2f8a9dc-663e-4288-aa02-9e553a7b18a9\") " pod="openshift-marketplace/redhat-operators-2twgj" Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.577955 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2f8a9dc-663e-4288-aa02-9e553a7b18a9-catalog-content\") pod \"redhat-operators-2twgj\" (UID: \"d2f8a9dc-663e-4288-aa02-9e553a7b18a9\") " pod="openshift-marketplace/redhat-operators-2twgj" Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.578387 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2f8a9dc-663e-4288-aa02-9e553a7b18a9-catalog-content\") pod \"redhat-operators-2twgj\" (UID: \"d2f8a9dc-663e-4288-aa02-9e553a7b18a9\") " pod="openshift-marketplace/redhat-operators-2twgj" Nov 24 08:00:13 crc kubenswrapper[4691]: E1124 08:00:13.578481 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:14.078465103 +0000 UTC m=+176.077414352 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.579048 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2f8a9dc-663e-4288-aa02-9e553a7b18a9-utilities\") pod \"redhat-operators-2twgj\" (UID: \"d2f8a9dc-663e-4288-aa02-9e553a7b18a9\") " pod="openshift-marketplace/redhat-operators-2twgj" Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.595131 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q9grw\" (UniqueName: \"kubernetes.io/projected/d2f8a9dc-663e-4288-aa02-9e553a7b18a9-kube-api-access-q9grw\") pod \"redhat-operators-2twgj\" (UID: \"d2f8a9dc-663e-4288-aa02-9e553a7b18a9\") " pod="openshift-marketplace/redhat-operators-2twgj" Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.679933 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:13 crc kubenswrapper[4691]: E1124 08:00:13.680328 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:14.180313597 +0000 UTC m=+176.179262846 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.689617 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2twgj" Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.719723 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xk274" event={"ID":"a3e985ce-848b-49fc-99aa-8f9fc08820be","Type":"ContainerStarted","Data":"eaa6a44815dc5fce735abe3b292d3266427d4748a79a17cd89148c5316b1c2e6"} Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.721647 4691 generic.go:334] "Generic (PLEG): container finished" podID="310b3b1c-76c2-4eca-afab-78c468d3b2a8" containerID="4af608162ae44d7f508e1a9569d0bde8074cd2f483029b2ef41722c9a8f466cd" exitCode=0 Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.721719 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"310b3b1c-76c2-4eca-afab-78c468d3b2a8","Type":"ContainerDied","Data":"4af608162ae44d7f508e1a9569d0bde8074cd2f483029b2ef41722c9a8f466cd"} Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.723853 4691 generic.go:334] "Generic (PLEG): container finished" podID="50e404aa-af0b-471a-9289-ab9bb5317ffc" containerID="60ea4e33161425139b91adc2c72aa669d439ff8cb55fa445c03f68b13c5dd1dc" exitCode=0 Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.724086 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2t866" event={"ID":"50e404aa-af0b-471a-9289-ab9bb5317ffc","Type":"ContainerDied","Data":"60ea4e33161425139b91adc2c72aa669d439ff8cb55fa445c03f68b13c5dd1dc"} Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.728898 4691 generic.go:334] "Generic (PLEG): container finished" podID="3fa49d03-3c6c-42c1-a0ce-d462695f1e2c" containerID="f361bdf8c56e68aae85f8a766056b1444146d06bf6785362f1cb4ba82e3ec258" exitCode=0 Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.728938 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"3fa49d03-3c6c-42c1-a0ce-d462695f1e2c","Type":"ContainerDied","Data":"f361bdf8c56e68aae85f8a766056b1444146d06bf6785362f1cb4ba82e3ec258"} Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.755829 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cbnjg"] Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.781931 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:13 crc kubenswrapper[4691]: E1124 08:00:13.783887 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:14.283835948 +0000 UTC m=+176.282785197 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.784694 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:13 crc kubenswrapper[4691]: E1124 08:00:13.792434 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:14.292403998 +0000 UTC m=+176.291353247 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.892354 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:13 crc kubenswrapper[4691]: E1124 08:00:13.893356 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-24 08:00:14.393298663 +0000 UTC m=+176.392247912 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.893433 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:13 crc kubenswrapper[4691]: E1124 08:00:13.893837 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-24 08:00:14.393813028 +0000 UTC m=+176.392762477 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-dbzsg" (UID: "cde6026c-736d-47f2-ab64-deb47de62820") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.910743 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2twgj"] Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.970094 4691 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-24T08:00:13.037555386Z","Handler":null,"Name":""} Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.974491 4691 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.974522 4691 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.994806 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 24 08:00:13 crc kubenswrapper[4691]: I1124 08:00:13.998138 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 24 08:00:14 crc kubenswrapper[4691]: I1124 08:00:14.096929 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:14 crc kubenswrapper[4691]: I1124 08:00:14.099937 4691 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 24 08:00:14 crc kubenswrapper[4691]: I1124 08:00:14.099978 4691 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:14 crc kubenswrapper[4691]: I1124 08:00:14.143361 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-dbzsg\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:14 crc kubenswrapper[4691]: I1124 08:00:14.144052 4691 patch_prober.go:28] interesting pod/router-default-5444994796-njfp2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 08:00:14 crc kubenswrapper[4691]: [-]has-synced failed: reason withheld Nov 24 08:00:14 crc kubenswrapper[4691]: [+]process-running ok Nov 24 08:00:14 crc kubenswrapper[4691]: healthz check failed Nov 24 08:00:14 crc kubenswrapper[4691]: I1124 08:00:14.144100 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-njfp2" podUID="7ff52e52-aab5-4850-9d4d-4f427689c82b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 08:00:14 crc kubenswrapper[4691]: I1124 08:00:14.306928 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:14 crc kubenswrapper[4691]: I1124 08:00:14.532815 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-dbzsg"] Nov 24 08:00:14 crc kubenswrapper[4691]: W1124 08:00:14.542691 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcde6026c_736d_47f2_ab64_deb47de62820.slice/crio-9bb5b1286f39b355e49d0feff3367ab614c50aa2c0edd3331a57665150248e99 WatchSource:0}: Error finding container 9bb5b1286f39b355e49d0feff3367ab614c50aa2c0edd3331a57665150248e99: Status 404 returned error can't find the container with id 9bb5b1286f39b355e49d0feff3367ab614c50aa2c0edd3331a57665150248e99 Nov 24 08:00:14 crc kubenswrapper[4691]: I1124 08:00:14.737866 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" event={"ID":"cde6026c-736d-47f2-ab64-deb47de62820","Type":"ContainerStarted","Data":"9bb5b1286f39b355e49d0feff3367ab614c50aa2c0edd3331a57665150248e99"} Nov 24 08:00:14 crc kubenswrapper[4691]: I1124 08:00:14.740285 4691 generic.go:334] "Generic (PLEG): container finished" podID="d2f8a9dc-663e-4288-aa02-9e553a7b18a9" containerID="18d64c0b0992a8e53fd7c91e4483ffb38a89a86e30966f6991ddce31fa5d9f6d" exitCode=0 Nov 24 08:00:14 crc kubenswrapper[4691]: I1124 08:00:14.740374 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2twgj" event={"ID":"d2f8a9dc-663e-4288-aa02-9e553a7b18a9","Type":"ContainerDied","Data":"18d64c0b0992a8e53fd7c91e4483ffb38a89a86e30966f6991ddce31fa5d9f6d"} Nov 24 08:00:14 crc kubenswrapper[4691]: I1124 08:00:14.740439 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2twgj" event={"ID":"d2f8a9dc-663e-4288-aa02-9e553a7b18a9","Type":"ContainerStarted","Data":"c3a0f29939f046dcc1c879b7d3651058d457e713ea36ead672327514c4a9bef1"} Nov 24 08:00:14 crc kubenswrapper[4691]: I1124 08:00:14.743237 4691 generic.go:334] "Generic (PLEG): container finished" podID="a3e985ce-848b-49fc-99aa-8f9fc08820be" containerID="8fdad7ad68e761db5244441649abdd1a2761268f94e9041b6cb07d219b632882" exitCode=0 Nov 24 08:00:14 crc kubenswrapper[4691]: I1124 08:00:14.743361 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xk274" event={"ID":"a3e985ce-848b-49fc-99aa-8f9fc08820be","Type":"ContainerDied","Data":"8fdad7ad68e761db5244441649abdd1a2761268f94e9041b6cb07d219b632882"} Nov 24 08:00:14 crc kubenswrapper[4691]: I1124 08:00:14.745547 4691 generic.go:334] "Generic (PLEG): container finished" podID="5c3c36fe-8ff0-4639-ae9c-e69785ea4611" containerID="816346317d4822ba600849ea4d8245a561597e2559d11be8ff8f079de7c2d029" exitCode=0 Nov 24 08:00:14 crc kubenswrapper[4691]: I1124 08:00:14.745641 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cbnjg" event={"ID":"5c3c36fe-8ff0-4639-ae9c-e69785ea4611","Type":"ContainerDied","Data":"816346317d4822ba600849ea4d8245a561597e2559d11be8ff8f079de7c2d029"} Nov 24 08:00:14 crc kubenswrapper[4691]: I1124 08:00:14.745671 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cbnjg" event={"ID":"5c3c36fe-8ff0-4639-ae9c-e69785ea4611","Type":"ContainerStarted","Data":"1b2d5cfe20c9fa75f3c8a4884d40174a134947300c4ca07264cc6bfac01711ca"} Nov 24 08:00:14 crc kubenswrapper[4691]: I1124 08:00:14.750803 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-22hbl" event={"ID":"f1001a2a-f7b8-46cc-b8e8-852fffb997e5","Type":"ContainerStarted","Data":"cef7b71486c2e22a3b253081ffcc9d5e260556a7c69cafa540cdc439ec192dd1"} Nov 24 08:00:14 crc kubenswrapper[4691]: I1124 08:00:14.774254 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 24 08:00:14 crc kubenswrapper[4691]: I1124 08:00:14.795197 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-22hbl" podStartSLOduration=16.795095021 podStartE2EDuration="16.795095021s" podCreationTimestamp="2025-11-24 07:59:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:14.791115355 +0000 UTC m=+176.790064624" watchObservedRunningTime="2025-11-24 08:00:14.795095021 +0000 UTC m=+176.794044280" Nov 24 08:00:14 crc kubenswrapper[4691]: I1124 08:00:14.968338 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 08:00:14 crc kubenswrapper[4691]: I1124 08:00:14.993879 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 08:00:15 crc kubenswrapper[4691]: I1124 08:00:15.111132 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3fa49d03-3c6c-42c1-a0ce-d462695f1e2c-kubelet-dir\") pod \"3fa49d03-3c6c-42c1-a0ce-d462695f1e2c\" (UID: \"3fa49d03-3c6c-42c1-a0ce-d462695f1e2c\") " Nov 24 08:00:15 crc kubenswrapper[4691]: I1124 08:00:15.111232 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3fa49d03-3c6c-42c1-a0ce-d462695f1e2c-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "3fa49d03-3c6c-42c1-a0ce-d462695f1e2c" (UID: "3fa49d03-3c6c-42c1-a0ce-d462695f1e2c"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 08:00:15 crc kubenswrapper[4691]: I1124 08:00:15.111234 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3fa49d03-3c6c-42c1-a0ce-d462695f1e2c-kube-api-access\") pod \"3fa49d03-3c6c-42c1-a0ce-d462695f1e2c\" (UID: \"3fa49d03-3c6c-42c1-a0ce-d462695f1e2c\") " Nov 24 08:00:15 crc kubenswrapper[4691]: I1124 08:00:15.111299 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/310b3b1c-76c2-4eca-afab-78c468d3b2a8-kubelet-dir\") pod \"310b3b1c-76c2-4eca-afab-78c468d3b2a8\" (UID: \"310b3b1c-76c2-4eca-afab-78c468d3b2a8\") " Nov 24 08:00:15 crc kubenswrapper[4691]: I1124 08:00:15.111401 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/310b3b1c-76c2-4eca-afab-78c468d3b2a8-kube-api-access\") pod \"310b3b1c-76c2-4eca-afab-78c468d3b2a8\" (UID: \"310b3b1c-76c2-4eca-afab-78c468d3b2a8\") " Nov 24 08:00:15 crc kubenswrapper[4691]: I1124 08:00:15.111454 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/310b3b1c-76c2-4eca-afab-78c468d3b2a8-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "310b3b1c-76c2-4eca-afab-78c468d3b2a8" (UID: "310b3b1c-76c2-4eca-afab-78c468d3b2a8"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 08:00:15 crc kubenswrapper[4691]: I1124 08:00:15.111752 4691 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/310b3b1c-76c2-4eca-afab-78c468d3b2a8-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 24 08:00:15 crc kubenswrapper[4691]: I1124 08:00:15.111776 4691 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3fa49d03-3c6c-42c1-a0ce-d462695f1e2c-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 24 08:00:15 crc kubenswrapper[4691]: I1124 08:00:15.116800 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3fa49d03-3c6c-42c1-a0ce-d462695f1e2c-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "3fa49d03-3c6c-42c1-a0ce-d462695f1e2c" (UID: "3fa49d03-3c6c-42c1-a0ce-d462695f1e2c"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:00:15 crc kubenswrapper[4691]: I1124 08:00:15.117689 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/310b3b1c-76c2-4eca-afab-78c468d3b2a8-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "310b3b1c-76c2-4eca-afab-78c468d3b2a8" (UID: "310b3b1c-76c2-4eca-afab-78c468d3b2a8"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:00:15 crc kubenswrapper[4691]: I1124 08:00:15.135270 4691 patch_prober.go:28] interesting pod/router-default-5444994796-njfp2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 08:00:15 crc kubenswrapper[4691]: [-]has-synced failed: reason withheld Nov 24 08:00:15 crc kubenswrapper[4691]: [+]process-running ok Nov 24 08:00:15 crc kubenswrapper[4691]: healthz check failed Nov 24 08:00:15 crc kubenswrapper[4691]: I1124 08:00:15.135365 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-njfp2" podUID="7ff52e52-aab5-4850-9d4d-4f427689c82b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 08:00:15 crc kubenswrapper[4691]: I1124 08:00:15.213160 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3fa49d03-3c6c-42c1-a0ce-d462695f1e2c-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 24 08:00:15 crc kubenswrapper[4691]: I1124 08:00:15.213244 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/310b3b1c-76c2-4eca-afab-78c468d3b2a8-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 24 08:00:15 crc kubenswrapper[4691]: I1124 08:00:15.756034 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 08:00:15 crc kubenswrapper[4691]: I1124 08:00:15.761809 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"310b3b1c-76c2-4eca-afab-78c468d3b2a8","Type":"ContainerDied","Data":"674548eac6d1494b50bb8ac7af40c6bf22a21f41d213794a02a07ffff17b6ead"} Nov 24 08:00:15 crc kubenswrapper[4691]: I1124 08:00:15.761848 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="674548eac6d1494b50bb8ac7af40c6bf22a21f41d213794a02a07ffff17b6ead" Nov 24 08:00:15 crc kubenswrapper[4691]: I1124 08:00:15.761934 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 24 08:00:15 crc kubenswrapper[4691]: I1124 08:00:15.765553 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-6b2ss" Nov 24 08:00:15 crc kubenswrapper[4691]: I1124 08:00:15.766901 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"3fa49d03-3c6c-42c1-a0ce-d462695f1e2c","Type":"ContainerDied","Data":"668735ffdfc67d9caea2efebb15ecf66163bcf4c4b70240d6b0151c8475aed4c"} Nov 24 08:00:15 crc kubenswrapper[4691]: I1124 08:00:15.767101 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="668735ffdfc67d9caea2efebb15ecf66163bcf4c4b70240d6b0151c8475aed4c" Nov 24 08:00:15 crc kubenswrapper[4691]: I1124 08:00:15.767285 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 24 08:00:15 crc kubenswrapper[4691]: I1124 08:00:15.776347 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" event={"ID":"cde6026c-736d-47f2-ab64-deb47de62820","Type":"ContainerStarted","Data":"c459fced275640d0b1beaf832f9fb42975bb17019431036a88069fb04e87e5a5"} Nov 24 08:00:15 crc kubenswrapper[4691]: I1124 08:00:15.776400 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:15 crc kubenswrapper[4691]: I1124 08:00:15.810303 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" podStartSLOduration=154.810257857 podStartE2EDuration="2m34.810257857s" podCreationTimestamp="2025-11-24 07:57:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:00:15.802606364 +0000 UTC m=+177.801555623" watchObservedRunningTime="2025-11-24 08:00:15.810257857 +0000 UTC m=+177.809207116" Nov 24 08:00:16 crc kubenswrapper[4691]: I1124 08:00:16.144346 4691 patch_prober.go:28] interesting pod/router-default-5444994796-njfp2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 08:00:16 crc kubenswrapper[4691]: [-]has-synced failed: reason withheld Nov 24 08:00:16 crc kubenswrapper[4691]: [+]process-running ok Nov 24 08:00:16 crc kubenswrapper[4691]: healthz check failed Nov 24 08:00:16 crc kubenswrapper[4691]: I1124 08:00:16.144431 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-njfp2" podUID="7ff52e52-aab5-4850-9d4d-4f427689c82b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 08:00:16 crc kubenswrapper[4691]: I1124 08:00:16.623919 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-sr2d4" Nov 24 08:00:17 crc kubenswrapper[4691]: I1124 08:00:17.137302 4691 patch_prober.go:28] interesting pod/router-default-5444994796-njfp2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 08:00:17 crc kubenswrapper[4691]: [-]has-synced failed: reason withheld Nov 24 08:00:17 crc kubenswrapper[4691]: [+]process-running ok Nov 24 08:00:17 crc kubenswrapper[4691]: healthz check failed Nov 24 08:00:17 crc kubenswrapper[4691]: I1124 08:00:17.137388 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-njfp2" podUID="7ff52e52-aab5-4850-9d4d-4f427689c82b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 08:00:18 crc kubenswrapper[4691]: I1124 08:00:18.135222 4691 patch_prober.go:28] interesting pod/router-default-5444994796-njfp2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 08:00:18 crc kubenswrapper[4691]: [-]has-synced failed: reason withheld Nov 24 08:00:18 crc kubenswrapper[4691]: [+]process-running ok Nov 24 08:00:18 crc kubenswrapper[4691]: healthz check failed Nov 24 08:00:18 crc kubenswrapper[4691]: I1124 08:00:18.135292 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-njfp2" podUID="7ff52e52-aab5-4850-9d4d-4f427689c82b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 08:00:19 crc kubenswrapper[4691]: I1124 08:00:19.135912 4691 patch_prober.go:28] interesting pod/router-default-5444994796-njfp2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 08:00:19 crc kubenswrapper[4691]: [-]has-synced failed: reason withheld Nov 24 08:00:19 crc kubenswrapper[4691]: [+]process-running ok Nov 24 08:00:19 crc kubenswrapper[4691]: healthz check failed Nov 24 08:00:19 crc kubenswrapper[4691]: I1124 08:00:19.136220 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-njfp2" podUID="7ff52e52-aab5-4850-9d4d-4f427689c82b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 08:00:20 crc kubenswrapper[4691]: I1124 08:00:20.134278 4691 patch_prober.go:28] interesting pod/router-default-5444994796-njfp2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 08:00:20 crc kubenswrapper[4691]: [-]has-synced failed: reason withheld Nov 24 08:00:20 crc kubenswrapper[4691]: [+]process-running ok Nov 24 08:00:20 crc kubenswrapper[4691]: healthz check failed Nov 24 08:00:20 crc kubenswrapper[4691]: I1124 08:00:20.134692 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-njfp2" podUID="7ff52e52-aab5-4850-9d4d-4f427689c82b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 08:00:20 crc kubenswrapper[4691]: I1124 08:00:20.410029 4691 patch_prober.go:28] interesting pod/console-f9d7485db-h9wgf container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.10:8443/health\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Nov 24 08:00:20 crc kubenswrapper[4691]: I1124 08:00:20.410111 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-h9wgf" podUID="d611a7c5-68d8-4ea5-88b7-d3fad9baef65" containerName="console" probeResult="failure" output="Get \"https://10.217.0.10:8443/health\": dial tcp 10.217.0.10:8443: connect: connection refused" Nov 24 08:00:20 crc kubenswrapper[4691]: I1124 08:00:20.677059 4691 patch_prober.go:28] interesting pod/downloads-7954f5f757-7cng4 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 24 08:00:20 crc kubenswrapper[4691]: I1124 08:00:20.677241 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-7cng4" podUID="1b8890e7-15c8-4467-b31b-493b565c584a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 24 08:00:20 crc kubenswrapper[4691]: I1124 08:00:20.678628 4691 patch_prober.go:28] interesting pod/downloads-7954f5f757-7cng4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 24 08:00:20 crc kubenswrapper[4691]: I1124 08:00:20.678712 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7cng4" podUID="1b8890e7-15c8-4467-b31b-493b565c584a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 24 08:00:21 crc kubenswrapper[4691]: I1124 08:00:21.090120 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:00:21 crc kubenswrapper[4691]: I1124 08:00:21.090197 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:00:21 crc kubenswrapper[4691]: I1124 08:00:21.133974 4691 patch_prober.go:28] interesting pod/router-default-5444994796-njfp2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 08:00:21 crc kubenswrapper[4691]: [-]has-synced failed: reason withheld Nov 24 08:00:21 crc kubenswrapper[4691]: [+]process-running ok Nov 24 08:00:21 crc kubenswrapper[4691]: healthz check failed Nov 24 08:00:21 crc kubenswrapper[4691]: I1124 08:00:21.134046 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-njfp2" podUID="7ff52e52-aab5-4850-9d4d-4f427689c82b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 08:00:22 crc kubenswrapper[4691]: I1124 08:00:22.135647 4691 patch_prober.go:28] interesting pod/router-default-5444994796-njfp2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 08:00:22 crc kubenswrapper[4691]: [-]has-synced failed: reason withheld Nov 24 08:00:22 crc kubenswrapper[4691]: [+]process-running ok Nov 24 08:00:22 crc kubenswrapper[4691]: healthz check failed Nov 24 08:00:22 crc kubenswrapper[4691]: I1124 08:00:22.135711 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-njfp2" podUID="7ff52e52-aab5-4850-9d4d-4f427689c82b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 08:00:23 crc kubenswrapper[4691]: I1124 08:00:23.133887 4691 patch_prober.go:28] interesting pod/router-default-5444994796-njfp2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 08:00:23 crc kubenswrapper[4691]: [-]has-synced failed: reason withheld Nov 24 08:00:23 crc kubenswrapper[4691]: [+]process-running ok Nov 24 08:00:23 crc kubenswrapper[4691]: healthz check failed Nov 24 08:00:23 crc kubenswrapper[4691]: I1124 08:00:23.134353 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-njfp2" podUID="7ff52e52-aab5-4850-9d4d-4f427689c82b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 08:00:24 crc kubenswrapper[4691]: I1124 08:00:24.134696 4691 patch_prober.go:28] interesting pod/router-default-5444994796-njfp2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 24 08:00:24 crc kubenswrapper[4691]: [-]has-synced failed: reason withheld Nov 24 08:00:24 crc kubenswrapper[4691]: [+]process-running ok Nov 24 08:00:24 crc kubenswrapper[4691]: healthz check failed Nov 24 08:00:24 crc kubenswrapper[4691]: I1124 08:00:24.134770 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-njfp2" podUID="7ff52e52-aab5-4850-9d4d-4f427689c82b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 24 08:00:25 crc kubenswrapper[4691]: I1124 08:00:25.134742 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-njfp2" Nov 24 08:00:25 crc kubenswrapper[4691]: I1124 08:00:25.137919 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-njfp2" Nov 24 08:00:30 crc kubenswrapper[4691]: I1124 08:00:30.416695 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-h9wgf" Nov 24 08:00:30 crc kubenswrapper[4691]: I1124 08:00:30.422497 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-h9wgf" Nov 24 08:00:30 crc kubenswrapper[4691]: I1124 08:00:30.677120 4691 patch_prober.go:28] interesting pod/downloads-7954f5f757-7cng4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 24 08:00:30 crc kubenswrapper[4691]: I1124 08:00:30.677137 4691 patch_prober.go:28] interesting pod/downloads-7954f5f757-7cng4 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 24 08:00:30 crc kubenswrapper[4691]: I1124 08:00:30.677199 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7cng4" podUID="1b8890e7-15c8-4467-b31b-493b565c584a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 24 08:00:30 crc kubenswrapper[4691]: I1124 08:00:30.677214 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-7cng4" podUID="1b8890e7-15c8-4467-b31b-493b565c584a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 24 08:00:30 crc kubenswrapper[4691]: I1124 08:00:30.677284 4691 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-7cng4" Nov 24 08:00:30 crc kubenswrapper[4691]: I1124 08:00:30.678112 4691 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"c0be7e04985fd735591b53b6a109c8dfcf26f8e9acb7bddb3f92aa06f7c9dae6"} pod="openshift-console/downloads-7954f5f757-7cng4" containerMessage="Container download-server failed liveness probe, will be restarted" Nov 24 08:00:30 crc kubenswrapper[4691]: I1124 08:00:30.678222 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-7cng4" podUID="1b8890e7-15c8-4467-b31b-493b565c584a" containerName="download-server" containerID="cri-o://c0be7e04985fd735591b53b6a109c8dfcf26f8e9acb7bddb3f92aa06f7c9dae6" gracePeriod=2 Nov 24 08:00:30 crc kubenswrapper[4691]: I1124 08:00:30.678479 4691 patch_prober.go:28] interesting pod/downloads-7954f5f757-7cng4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 24 08:00:30 crc kubenswrapper[4691]: I1124 08:00:30.678623 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7cng4" podUID="1b8890e7-15c8-4467-b31b-493b565c584a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 24 08:00:31 crc kubenswrapper[4691]: I1124 08:00:31.893535 4691 generic.go:334] "Generic (PLEG): container finished" podID="1b8890e7-15c8-4467-b31b-493b565c584a" containerID="c0be7e04985fd735591b53b6a109c8dfcf26f8e9acb7bddb3f92aa06f7c9dae6" exitCode=0 Nov 24 08:00:31 crc kubenswrapper[4691]: I1124 08:00:31.893641 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-7cng4" event={"ID":"1b8890e7-15c8-4467-b31b-493b565c584a","Type":"ContainerDied","Data":"c0be7e04985fd735591b53b6a109c8dfcf26f8e9acb7bddb3f92aa06f7c9dae6"} Nov 24 08:00:34 crc kubenswrapper[4691]: I1124 08:00:34.313827 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:00:37 crc kubenswrapper[4691]: E1124 08:00:37.330088 4691 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 24 08:00:37 crc kubenswrapper[4691]: E1124 08:00:37.330980 4691 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-zgkzd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-2t866_openshift-marketplace(50e404aa-af0b-471a-9289-ab9bb5317ffc): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 24 08:00:37 crc kubenswrapper[4691]: E1124 08:00:37.332315 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-2t866" podUID="50e404aa-af0b-471a-9289-ab9bb5317ffc" Nov 24 08:00:37 crc kubenswrapper[4691]: E1124 08:00:37.425597 4691 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 24 08:00:37 crc kubenswrapper[4691]: E1124 08:00:37.425830 4691 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cpq4c,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-pnnmn_openshift-marketplace(9312bc5d-54a5-4172-9674-1afebef9cc98): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 24 08:00:37 crc kubenswrapper[4691]: E1124 08:00:37.427095 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-pnnmn" podUID="9312bc5d-54a5-4172-9674-1afebef9cc98" Nov 24 08:00:40 crc kubenswrapper[4691]: I1124 08:00:40.676385 4691 patch_prober.go:28] interesting pod/downloads-7954f5f757-7cng4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 24 08:00:40 crc kubenswrapper[4691]: I1124 08:00:40.676782 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7cng4" podUID="1b8890e7-15c8-4467-b31b-493b565c584a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 24 08:00:41 crc kubenswrapper[4691]: I1124 08:00:41.501263 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tqr2v" Nov 24 08:00:45 crc kubenswrapper[4691]: E1124 08:00:45.506755 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-2t866" podUID="50e404aa-af0b-471a-9289-ab9bb5317ffc" Nov 24 08:00:45 crc kubenswrapper[4691]: E1124 08:00:45.506766 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-pnnmn" podUID="9312bc5d-54a5-4172-9674-1afebef9cc98" Nov 24 08:00:46 crc kubenswrapper[4691]: E1124 08:00:46.356656 4691 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 24 08:00:46 crc kubenswrapper[4691]: E1124 08:00:46.357562 4691 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-q9grw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-2twgj_openshift-marketplace(d2f8a9dc-663e-4288-aa02-9e553a7b18a9): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 24 08:00:46 crc kubenswrapper[4691]: E1124 08:00:46.359033 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-2twgj" podUID="d2f8a9dc-663e-4288-aa02-9e553a7b18a9" Nov 24 08:00:46 crc kubenswrapper[4691]: E1124 08:00:46.422408 4691 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 24 08:00:46 crc kubenswrapper[4691]: E1124 08:00:46.422615 4691 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5ph76,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-xk274_openshift-marketplace(a3e985ce-848b-49fc-99aa-8f9fc08820be): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 24 08:00:46 crc kubenswrapper[4691]: E1124 08:00:46.423824 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-xk274" podUID="a3e985ce-848b-49fc-99aa-8f9fc08820be" Nov 24 08:00:46 crc kubenswrapper[4691]: I1124 08:00:46.990652 4691 generic.go:334] "Generic (PLEG): container finished" podID="7cd069de-809b-4bdf-8dad-51f00b26dcb0" containerID="591887209b409551283399c4fbc89f9c1718b56f363f28a9d718e7be0bed73d5" exitCode=0 Nov 24 08:00:46 crc kubenswrapper[4691]: I1124 08:00:46.991051 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mszmq" event={"ID":"7cd069de-809b-4bdf-8dad-51f00b26dcb0","Type":"ContainerDied","Data":"591887209b409551283399c4fbc89f9c1718b56f363f28a9d718e7be0bed73d5"} Nov 24 08:00:46 crc kubenswrapper[4691]: I1124 08:00:46.997040 4691 generic.go:334] "Generic (PLEG): container finished" podID="ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e" containerID="9ec1c7e8cb1e60b48c17bb0941fbe1535d397249f2ca05dd1af653a169aa8612" exitCode=0 Nov 24 08:00:46 crc kubenswrapper[4691]: I1124 08:00:46.997137 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5r6nd" event={"ID":"ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e","Type":"ContainerDied","Data":"9ec1c7e8cb1e60b48c17bb0941fbe1535d397249f2ca05dd1af653a169aa8612"} Nov 24 08:00:47 crc kubenswrapper[4691]: I1124 08:00:47.001913 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-7cng4" event={"ID":"1b8890e7-15c8-4467-b31b-493b565c584a","Type":"ContainerStarted","Data":"8bb6c0802059aba4e5b7d2b817278371127d0c46b07748b9655ca48ee071802a"} Nov 24 08:00:47 crc kubenswrapper[4691]: I1124 08:00:47.002481 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-7cng4" Nov 24 08:00:47 crc kubenswrapper[4691]: I1124 08:00:47.005583 4691 patch_prober.go:28] interesting pod/downloads-7954f5f757-7cng4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 24 08:00:47 crc kubenswrapper[4691]: E1124 08:00:47.005662 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-xk274" podUID="a3e985ce-848b-49fc-99aa-8f9fc08820be" Nov 24 08:00:47 crc kubenswrapper[4691]: I1124 08:00:47.005873 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7cng4" podUID="1b8890e7-15c8-4467-b31b-493b565c584a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 24 08:00:47 crc kubenswrapper[4691]: E1124 08:00:47.009720 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-2twgj" podUID="d2f8a9dc-663e-4288-aa02-9e553a7b18a9" Nov 24 08:00:48 crc kubenswrapper[4691]: I1124 08:00:48.012614 4691 generic.go:334] "Generic (PLEG): container finished" podID="5c3c36fe-8ff0-4639-ae9c-e69785ea4611" containerID="ae5bf999410071387a25a9446bbf82bd60c37675b9f947813823e99508c6e406" exitCode=0 Nov 24 08:00:48 crc kubenswrapper[4691]: I1124 08:00:48.012727 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cbnjg" event={"ID":"5c3c36fe-8ff0-4639-ae9c-e69785ea4611","Type":"ContainerDied","Data":"ae5bf999410071387a25a9446bbf82bd60c37675b9f947813823e99508c6e406"} Nov 24 08:00:48 crc kubenswrapper[4691]: I1124 08:00:48.018022 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mszmq" event={"ID":"7cd069de-809b-4bdf-8dad-51f00b26dcb0","Type":"ContainerStarted","Data":"a70093d839a559d9b0a6a29dd4d64f992aec8e4a22a025f4dc092faa3bb2d3f3"} Nov 24 08:00:48 crc kubenswrapper[4691]: I1124 08:00:48.024703 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5r6nd" event={"ID":"ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e","Type":"ContainerStarted","Data":"ce04c23a0bfb4dd3ca78ba9c16fc588942c0ac0c32675e14b8ac96ad61f32c10"} Nov 24 08:00:48 crc kubenswrapper[4691]: I1124 08:00:48.027141 4691 generic.go:334] "Generic (PLEG): container finished" podID="d3ebf582-98e0-4899-885b-22a4289b2b4d" containerID="b6573f47434df7800f13ebd1439ef35f775104b38bac1728f5ebd0821c864c63" exitCode=0 Nov 24 08:00:48 crc kubenswrapper[4691]: I1124 08:00:48.028010 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bx62n" event={"ID":"d3ebf582-98e0-4899-885b-22a4289b2b4d","Type":"ContainerDied","Data":"b6573f47434df7800f13ebd1439ef35f775104b38bac1728f5ebd0821c864c63"} Nov 24 08:00:48 crc kubenswrapper[4691]: I1124 08:00:48.028100 4691 patch_prober.go:28] interesting pod/downloads-7954f5f757-7cng4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 24 08:00:48 crc kubenswrapper[4691]: I1124 08:00:48.028145 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7cng4" podUID="1b8890e7-15c8-4467-b31b-493b565c584a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 24 08:00:48 crc kubenswrapper[4691]: I1124 08:00:48.059931 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-5r6nd" podStartSLOduration=3.300429433 podStartE2EDuration="39.059907204s" podCreationTimestamp="2025-11-24 08:00:09 +0000 UTC" firstStartedPulling="2025-11-24 08:00:11.648670737 +0000 UTC m=+173.647619986" lastFinishedPulling="2025-11-24 08:00:47.408148508 +0000 UTC m=+209.407097757" observedRunningTime="2025-11-24 08:00:48.056302664 +0000 UTC m=+210.055251934" watchObservedRunningTime="2025-11-24 08:00:48.059907204 +0000 UTC m=+210.058856453" Nov 24 08:00:48 crc kubenswrapper[4691]: I1124 08:00:48.078172 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-mszmq" podStartSLOduration=2.28366882 podStartE2EDuration="38.07815426s" podCreationTimestamp="2025-11-24 08:00:10 +0000 UTC" firstStartedPulling="2025-11-24 08:00:11.680330818 +0000 UTC m=+173.679280077" lastFinishedPulling="2025-11-24 08:00:47.474816268 +0000 UTC m=+209.473765517" observedRunningTime="2025-11-24 08:00:48.077485499 +0000 UTC m=+210.076434768" watchObservedRunningTime="2025-11-24 08:00:48.07815426 +0000 UTC m=+210.077103509" Nov 24 08:00:49 crc kubenswrapper[4691]: I1124 08:00:49.035573 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bx62n" event={"ID":"d3ebf582-98e0-4899-885b-22a4289b2b4d","Type":"ContainerStarted","Data":"fb682ccb276dec4292aba14c1d7c775df04bb0bb1b7a372971af48bd34063156"} Nov 24 08:00:49 crc kubenswrapper[4691]: I1124 08:00:49.038635 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cbnjg" event={"ID":"5c3c36fe-8ff0-4639-ae9c-e69785ea4611","Type":"ContainerStarted","Data":"645f216e02f7bf0274cab15a8cb0937543d6b08d03cbad35f27a10b46a46c08a"} Nov 24 08:00:49 crc kubenswrapper[4691]: I1124 08:00:49.077610 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-bx62n" podStartSLOduration=3.1569380320000002 podStartE2EDuration="39.077582551s" podCreationTimestamp="2025-11-24 08:00:10 +0000 UTC" firstStartedPulling="2025-11-24 08:00:12.689265642 +0000 UTC m=+174.688214911" lastFinishedPulling="2025-11-24 08:00:48.609910181 +0000 UTC m=+210.608859430" observedRunningTime="2025-11-24 08:00:49.05587224 +0000 UTC m=+211.054821499" watchObservedRunningTime="2025-11-24 08:00:49.077582551 +0000 UTC m=+211.076531800" Nov 24 08:00:49 crc kubenswrapper[4691]: I1124 08:00:49.079229 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-cbnjg" podStartSLOduration=3.387243658 podStartE2EDuration="37.079220741s" podCreationTimestamp="2025-11-24 08:00:12 +0000 UTC" firstStartedPulling="2025-11-24 08:00:14.746559419 +0000 UTC m=+176.745508668" lastFinishedPulling="2025-11-24 08:00:48.438536502 +0000 UTC m=+210.437485751" observedRunningTime="2025-11-24 08:00:49.07656005 +0000 UTC m=+211.075509309" watchObservedRunningTime="2025-11-24 08:00:49.079220741 +0000 UTC m=+211.078169990" Nov 24 08:00:50 crc kubenswrapper[4691]: I1124 08:00:50.063692 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-5r6nd" Nov 24 08:00:50 crc kubenswrapper[4691]: I1124 08:00:50.063775 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-5r6nd" Nov 24 08:00:50 crc kubenswrapper[4691]: I1124 08:00:50.475506 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-5r6nd" Nov 24 08:00:50 crc kubenswrapper[4691]: I1124 08:00:50.478622 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-mszmq" Nov 24 08:00:50 crc kubenswrapper[4691]: I1124 08:00:50.478671 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-mszmq" Nov 24 08:00:50 crc kubenswrapper[4691]: I1124 08:00:50.526689 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-mszmq" Nov 24 08:00:50 crc kubenswrapper[4691]: I1124 08:00:50.676739 4691 patch_prober.go:28] interesting pod/downloads-7954f5f757-7cng4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 24 08:00:50 crc kubenswrapper[4691]: I1124 08:00:50.676831 4691 patch_prober.go:28] interesting pod/downloads-7954f5f757-7cng4 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 24 08:00:50 crc kubenswrapper[4691]: I1124 08:00:50.676833 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7cng4" podUID="1b8890e7-15c8-4467-b31b-493b565c584a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 24 08:00:50 crc kubenswrapper[4691]: I1124 08:00:50.676928 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-7cng4" podUID="1b8890e7-15c8-4467-b31b-493b565c584a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 24 08:00:50 crc kubenswrapper[4691]: I1124 08:00:50.682248 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-bx62n" Nov 24 08:00:50 crc kubenswrapper[4691]: I1124 08:00:50.682290 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-bx62n" Nov 24 08:00:51 crc kubenswrapper[4691]: I1124 08:00:51.089271 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:00:51 crc kubenswrapper[4691]: I1124 08:00:51.089354 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:00:51 crc kubenswrapper[4691]: I1124 08:00:51.089416 4691 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 08:00:51 crc kubenswrapper[4691]: I1124 08:00:51.090242 4691 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb"} pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 08:00:51 crc kubenswrapper[4691]: I1124 08:00:51.090299 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" containerID="cri-o://bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb" gracePeriod=600 Nov 24 08:00:51 crc kubenswrapper[4691]: I1124 08:00:51.736140 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-bx62n" podUID="d3ebf582-98e0-4899-885b-22a4289b2b4d" containerName="registry-server" probeResult="failure" output=< Nov 24 08:00:51 crc kubenswrapper[4691]: timeout: failed to connect service ":50051" within 1s Nov 24 08:00:51 crc kubenswrapper[4691]: > Nov 24 08:00:52 crc kubenswrapper[4691]: I1124 08:00:52.058190 4691 generic.go:334] "Generic (PLEG): container finished" podID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerID="bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb" exitCode=0 Nov 24 08:00:52 crc kubenswrapper[4691]: I1124 08:00:52.058257 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerDied","Data":"bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb"} Nov 24 08:00:53 crc kubenswrapper[4691]: I1124 08:00:53.067843 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerStarted","Data":"2696471643d2e0fe14b54a335aee3091d21a0ad84005def235cb124eca7c95b3"} Nov 24 08:00:53 crc kubenswrapper[4691]: I1124 08:00:53.395203 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-cbnjg" Nov 24 08:00:53 crc kubenswrapper[4691]: I1124 08:00:53.395786 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-cbnjg" Nov 24 08:00:54 crc kubenswrapper[4691]: I1124 08:00:54.441192 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-cbnjg" podUID="5c3c36fe-8ff0-4639-ae9c-e69785ea4611" containerName="registry-server" probeResult="failure" output=< Nov 24 08:00:54 crc kubenswrapper[4691]: timeout: failed to connect service ":50051" within 1s Nov 24 08:00:54 crc kubenswrapper[4691]: > Nov 24 08:00:58 crc kubenswrapper[4691]: I1124 08:00:58.098745 4691 generic.go:334] "Generic (PLEG): container finished" podID="9312bc5d-54a5-4172-9674-1afebef9cc98" containerID="4b1de7ca78bf651c84161b3469016d9e641d0f852a5f1d1dd9dd3ec802054cff" exitCode=0 Nov 24 08:00:58 crc kubenswrapper[4691]: I1124 08:00:58.099301 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pnnmn" event={"ID":"9312bc5d-54a5-4172-9674-1afebef9cc98","Type":"ContainerDied","Data":"4b1de7ca78bf651c84161b3469016d9e641d0f852a5f1d1dd9dd3ec802054cff"} Nov 24 08:00:59 crc kubenswrapper[4691]: I1124 08:00:59.038591 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-h2rcj"] Nov 24 08:00:59 crc kubenswrapper[4691]: I1124 08:00:59.105837 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2t866" event={"ID":"50e404aa-af0b-471a-9289-ab9bb5317ffc","Type":"ContainerStarted","Data":"9be25836b8018d4c8e0565f009c9e830bfafcc9bdb554921513027a48be8b025"} Nov 24 08:00:59 crc kubenswrapper[4691]: I1124 08:00:59.108245 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pnnmn" event={"ID":"9312bc5d-54a5-4172-9674-1afebef9cc98","Type":"ContainerStarted","Data":"e84332a2ddcf939d9b273ac6567be27e1871d0d05b00708654ad2f8f49cd7ae6"} Nov 24 08:00:59 crc kubenswrapper[4691]: I1124 08:00:59.170060 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-pnnmn" podStartSLOduration=3.316268844 podStartE2EDuration="50.170035791s" podCreationTimestamp="2025-11-24 08:00:09 +0000 UTC" firstStartedPulling="2025-11-24 08:00:11.634461054 +0000 UTC m=+173.633410303" lastFinishedPulling="2025-11-24 08:00:58.488228001 +0000 UTC m=+220.487177250" observedRunningTime="2025-11-24 08:00:59.163805421 +0000 UTC m=+221.162754670" watchObservedRunningTime="2025-11-24 08:00:59.170035791 +0000 UTC m=+221.168985040" Nov 24 08:01:00 crc kubenswrapper[4691]: I1124 08:01:00.112272 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-5r6nd" Nov 24 08:01:00 crc kubenswrapper[4691]: I1124 08:01:00.114738 4691 generic.go:334] "Generic (PLEG): container finished" podID="50e404aa-af0b-471a-9289-ab9bb5317ffc" containerID="9be25836b8018d4c8e0565f009c9e830bfafcc9bdb554921513027a48be8b025" exitCode=0 Nov 24 08:01:00 crc kubenswrapper[4691]: I1124 08:01:00.114791 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2t866" event={"ID":"50e404aa-af0b-471a-9289-ab9bb5317ffc","Type":"ContainerDied","Data":"9be25836b8018d4c8e0565f009c9e830bfafcc9bdb554921513027a48be8b025"} Nov 24 08:01:00 crc kubenswrapper[4691]: I1124 08:01:00.295945 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-pnnmn" Nov 24 08:01:00 crc kubenswrapper[4691]: I1124 08:01:00.295995 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-pnnmn" Nov 24 08:01:00 crc kubenswrapper[4691]: I1124 08:01:00.531108 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-mszmq" Nov 24 08:01:00 crc kubenswrapper[4691]: I1124 08:01:00.685203 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-7cng4" Nov 24 08:01:00 crc kubenswrapper[4691]: I1124 08:01:00.840111 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-bx62n" Nov 24 08:01:00 crc kubenswrapper[4691]: I1124 08:01:00.909615 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-bx62n" Nov 24 08:01:01 crc kubenswrapper[4691]: I1124 08:01:01.353075 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-pnnmn" podUID="9312bc5d-54a5-4172-9674-1afebef9cc98" containerName="registry-server" probeResult="failure" output=< Nov 24 08:01:01 crc kubenswrapper[4691]: timeout: failed to connect service ":50051" within 1s Nov 24 08:01:01 crc kubenswrapper[4691]: > Nov 24 08:01:01 crc kubenswrapper[4691]: I1124 08:01:01.388998 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bx62n"] Nov 24 08:01:02 crc kubenswrapper[4691]: I1124 08:01:02.127978 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2t866" event={"ID":"50e404aa-af0b-471a-9289-ab9bb5317ffc","Type":"ContainerStarted","Data":"1da44a96efd55d3c0b8a902e184022f8dd8e3d5b611502eb2c86cee8206c93a6"} Nov 24 08:01:02 crc kubenswrapper[4691]: I1124 08:01:02.129896 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-bx62n" podUID="d3ebf582-98e0-4899-885b-22a4289b2b4d" containerName="registry-server" containerID="cri-o://fb682ccb276dec4292aba14c1d7c775df04bb0bb1b7a372971af48bd34063156" gracePeriod=2 Nov 24 08:01:02 crc kubenswrapper[4691]: I1124 08:01:02.129984 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2twgj" event={"ID":"d2f8a9dc-663e-4288-aa02-9e553a7b18a9","Type":"ContainerStarted","Data":"c3bef42c2a00d2af70dd4004423feb5a8e199affcdf0e961beb0a6aff7e9e76b"} Nov 24 08:01:02 crc kubenswrapper[4691]: I1124 08:01:02.149068 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-2t866" podStartSLOduration=2.996868413 podStartE2EDuration="51.14904562s" podCreationTimestamp="2025-11-24 08:00:11 +0000 UTC" firstStartedPulling="2025-11-24 08:00:13.726191011 +0000 UTC m=+175.725140260" lastFinishedPulling="2025-11-24 08:01:01.878368218 +0000 UTC m=+223.877317467" observedRunningTime="2025-11-24 08:01:02.146501763 +0000 UTC m=+224.145451022" watchObservedRunningTime="2025-11-24 08:01:02.14904562 +0000 UTC m=+224.147994869" Nov 24 08:01:02 crc kubenswrapper[4691]: I1124 08:01:02.249230 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-2t866" Nov 24 08:01:02 crc kubenswrapper[4691]: I1124 08:01:02.249470 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-2t866" Nov 24 08:01:02 crc kubenswrapper[4691]: I1124 08:01:02.523652 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bx62n" Nov 24 08:01:02 crc kubenswrapper[4691]: I1124 08:01:02.698510 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3ebf582-98e0-4899-885b-22a4289b2b4d-utilities\") pod \"d3ebf582-98e0-4899-885b-22a4289b2b4d\" (UID: \"d3ebf582-98e0-4899-885b-22a4289b2b4d\") " Nov 24 08:01:02 crc kubenswrapper[4691]: I1124 08:01:02.698634 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wcvc7\" (UniqueName: \"kubernetes.io/projected/d3ebf582-98e0-4899-885b-22a4289b2b4d-kube-api-access-wcvc7\") pod \"d3ebf582-98e0-4899-885b-22a4289b2b4d\" (UID: \"d3ebf582-98e0-4899-885b-22a4289b2b4d\") " Nov 24 08:01:02 crc kubenswrapper[4691]: I1124 08:01:02.698683 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3ebf582-98e0-4899-885b-22a4289b2b4d-catalog-content\") pod \"d3ebf582-98e0-4899-885b-22a4289b2b4d\" (UID: \"d3ebf582-98e0-4899-885b-22a4289b2b4d\") " Nov 24 08:01:02 crc kubenswrapper[4691]: I1124 08:01:02.699565 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d3ebf582-98e0-4899-885b-22a4289b2b4d-utilities" (OuterVolumeSpecName: "utilities") pod "d3ebf582-98e0-4899-885b-22a4289b2b4d" (UID: "d3ebf582-98e0-4899-885b-22a4289b2b4d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:01:02 crc kubenswrapper[4691]: I1124 08:01:02.704926 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3ebf582-98e0-4899-885b-22a4289b2b4d-kube-api-access-wcvc7" (OuterVolumeSpecName: "kube-api-access-wcvc7") pod "d3ebf582-98e0-4899-885b-22a4289b2b4d" (UID: "d3ebf582-98e0-4899-885b-22a4289b2b4d"). InnerVolumeSpecName "kube-api-access-wcvc7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:01:02 crc kubenswrapper[4691]: I1124 08:01:02.776336 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d3ebf582-98e0-4899-885b-22a4289b2b4d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d3ebf582-98e0-4899-885b-22a4289b2b4d" (UID: "d3ebf582-98e0-4899-885b-22a4289b2b4d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:01:02 crc kubenswrapper[4691]: I1124 08:01:02.792572 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-mszmq"] Nov 24 08:01:02 crc kubenswrapper[4691]: I1124 08:01:02.792870 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-mszmq" podUID="7cd069de-809b-4bdf-8dad-51f00b26dcb0" containerName="registry-server" containerID="cri-o://a70093d839a559d9b0a6a29dd4d64f992aec8e4a22a025f4dc092faa3bb2d3f3" gracePeriod=2 Nov 24 08:01:02 crc kubenswrapper[4691]: I1124 08:01:02.801113 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3ebf582-98e0-4899-885b-22a4289b2b4d-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:02 crc kubenswrapper[4691]: I1124 08:01:02.801651 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wcvc7\" (UniqueName: \"kubernetes.io/projected/d3ebf582-98e0-4899-885b-22a4289b2b4d-kube-api-access-wcvc7\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:02 crc kubenswrapper[4691]: I1124 08:01:02.801669 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3ebf582-98e0-4899-885b-22a4289b2b4d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:03 crc kubenswrapper[4691]: I1124 08:01:03.137893 4691 generic.go:334] "Generic (PLEG): container finished" podID="d2f8a9dc-663e-4288-aa02-9e553a7b18a9" containerID="c3bef42c2a00d2af70dd4004423feb5a8e199affcdf0e961beb0a6aff7e9e76b" exitCode=0 Nov 24 08:01:03 crc kubenswrapper[4691]: I1124 08:01:03.137982 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2twgj" event={"ID":"d2f8a9dc-663e-4288-aa02-9e553a7b18a9","Type":"ContainerDied","Data":"c3bef42c2a00d2af70dd4004423feb5a8e199affcdf0e961beb0a6aff7e9e76b"} Nov 24 08:01:03 crc kubenswrapper[4691]: I1124 08:01:03.142907 4691 generic.go:334] "Generic (PLEG): container finished" podID="d3ebf582-98e0-4899-885b-22a4289b2b4d" containerID="fb682ccb276dec4292aba14c1d7c775df04bb0bb1b7a372971af48bd34063156" exitCode=0 Nov 24 08:01:03 crc kubenswrapper[4691]: I1124 08:01:03.143012 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bx62n" event={"ID":"d3ebf582-98e0-4899-885b-22a4289b2b4d","Type":"ContainerDied","Data":"fb682ccb276dec4292aba14c1d7c775df04bb0bb1b7a372971af48bd34063156"} Nov 24 08:01:03 crc kubenswrapper[4691]: I1124 08:01:03.143047 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bx62n" event={"ID":"d3ebf582-98e0-4899-885b-22a4289b2b4d","Type":"ContainerDied","Data":"f1a29cdefc91e0335d00ac1d2e823da9b5f4a33b1f6bb9daf0813b6f318329ad"} Nov 24 08:01:03 crc kubenswrapper[4691]: I1124 08:01:03.143047 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bx62n" Nov 24 08:01:03 crc kubenswrapper[4691]: I1124 08:01:03.143070 4691 scope.go:117] "RemoveContainer" containerID="fb682ccb276dec4292aba14c1d7c775df04bb0bb1b7a372971af48bd34063156" Nov 24 08:01:03 crc kubenswrapper[4691]: I1124 08:01:03.148814 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xk274" event={"ID":"a3e985ce-848b-49fc-99aa-8f9fc08820be","Type":"ContainerStarted","Data":"3c4b3d199c90125278d76be392a2460aec3c1b54266f5fb1800907596940628a"} Nov 24 08:01:03 crc kubenswrapper[4691]: I1124 08:01:03.153234 4691 generic.go:334] "Generic (PLEG): container finished" podID="7cd069de-809b-4bdf-8dad-51f00b26dcb0" containerID="a70093d839a559d9b0a6a29dd4d64f992aec8e4a22a025f4dc092faa3bb2d3f3" exitCode=0 Nov 24 08:01:03 crc kubenswrapper[4691]: I1124 08:01:03.153426 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mszmq" event={"ID":"7cd069de-809b-4bdf-8dad-51f00b26dcb0","Type":"ContainerDied","Data":"a70093d839a559d9b0a6a29dd4d64f992aec8e4a22a025f4dc092faa3bb2d3f3"} Nov 24 08:01:03 crc kubenswrapper[4691]: I1124 08:01:03.162461 4691 scope.go:117] "RemoveContainer" containerID="b6573f47434df7800f13ebd1439ef35f775104b38bac1728f5ebd0821c864c63" Nov 24 08:01:03 crc kubenswrapper[4691]: I1124 08:01:03.279601 4691 scope.go:117] "RemoveContainer" containerID="118c467bfe209c3ffaf7438f15173d4514116afa5bdd3f0e84c4aa486c3dec7e" Nov 24 08:01:03 crc kubenswrapper[4691]: I1124 08:01:03.295757 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-2t866" podUID="50e404aa-af0b-471a-9289-ab9bb5317ffc" containerName="registry-server" probeResult="failure" output=< Nov 24 08:01:03 crc kubenswrapper[4691]: timeout: failed to connect service ":50051" within 1s Nov 24 08:01:03 crc kubenswrapper[4691]: > Nov 24 08:01:03 crc kubenswrapper[4691]: I1124 08:01:03.298504 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mszmq" Nov 24 08:01:03 crc kubenswrapper[4691]: I1124 08:01:03.304570 4691 scope.go:117] "RemoveContainer" containerID="fb682ccb276dec4292aba14c1d7c775df04bb0bb1b7a372971af48bd34063156" Nov 24 08:01:03 crc kubenswrapper[4691]: E1124 08:01:03.305097 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fb682ccb276dec4292aba14c1d7c775df04bb0bb1b7a372971af48bd34063156\": container with ID starting with fb682ccb276dec4292aba14c1d7c775df04bb0bb1b7a372971af48bd34063156 not found: ID does not exist" containerID="fb682ccb276dec4292aba14c1d7c775df04bb0bb1b7a372971af48bd34063156" Nov 24 08:01:03 crc kubenswrapper[4691]: I1124 08:01:03.305202 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fb682ccb276dec4292aba14c1d7c775df04bb0bb1b7a372971af48bd34063156"} err="failed to get container status \"fb682ccb276dec4292aba14c1d7c775df04bb0bb1b7a372971af48bd34063156\": rpc error: code = NotFound desc = could not find container \"fb682ccb276dec4292aba14c1d7c775df04bb0bb1b7a372971af48bd34063156\": container with ID starting with fb682ccb276dec4292aba14c1d7c775df04bb0bb1b7a372971af48bd34063156 not found: ID does not exist" Nov 24 08:01:03 crc kubenswrapper[4691]: I1124 08:01:03.305292 4691 scope.go:117] "RemoveContainer" containerID="b6573f47434df7800f13ebd1439ef35f775104b38bac1728f5ebd0821c864c63" Nov 24 08:01:03 crc kubenswrapper[4691]: E1124 08:01:03.305540 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b6573f47434df7800f13ebd1439ef35f775104b38bac1728f5ebd0821c864c63\": container with ID starting with b6573f47434df7800f13ebd1439ef35f775104b38bac1728f5ebd0821c864c63 not found: ID does not exist" containerID="b6573f47434df7800f13ebd1439ef35f775104b38bac1728f5ebd0821c864c63" Nov 24 08:01:03 crc kubenswrapper[4691]: I1124 08:01:03.305564 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b6573f47434df7800f13ebd1439ef35f775104b38bac1728f5ebd0821c864c63"} err="failed to get container status \"b6573f47434df7800f13ebd1439ef35f775104b38bac1728f5ebd0821c864c63\": rpc error: code = NotFound desc = could not find container \"b6573f47434df7800f13ebd1439ef35f775104b38bac1728f5ebd0821c864c63\": container with ID starting with b6573f47434df7800f13ebd1439ef35f775104b38bac1728f5ebd0821c864c63 not found: ID does not exist" Nov 24 08:01:03 crc kubenswrapper[4691]: I1124 08:01:03.305577 4691 scope.go:117] "RemoveContainer" containerID="118c467bfe209c3ffaf7438f15173d4514116afa5bdd3f0e84c4aa486c3dec7e" Nov 24 08:01:03 crc kubenswrapper[4691]: E1124 08:01:03.305774 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"118c467bfe209c3ffaf7438f15173d4514116afa5bdd3f0e84c4aa486c3dec7e\": container with ID starting with 118c467bfe209c3ffaf7438f15173d4514116afa5bdd3f0e84c4aa486c3dec7e not found: ID does not exist" containerID="118c467bfe209c3ffaf7438f15173d4514116afa5bdd3f0e84c4aa486c3dec7e" Nov 24 08:01:03 crc kubenswrapper[4691]: I1124 08:01:03.305794 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"118c467bfe209c3ffaf7438f15173d4514116afa5bdd3f0e84c4aa486c3dec7e"} err="failed to get container status \"118c467bfe209c3ffaf7438f15173d4514116afa5bdd3f0e84c4aa486c3dec7e\": rpc error: code = NotFound desc = could not find container \"118c467bfe209c3ffaf7438f15173d4514116afa5bdd3f0e84c4aa486c3dec7e\": container with ID starting with 118c467bfe209c3ffaf7438f15173d4514116afa5bdd3f0e84c4aa486c3dec7e not found: ID does not exist" Nov 24 08:01:03 crc kubenswrapper[4691]: I1124 08:01:03.319337 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bx62n"] Nov 24 08:01:03 crc kubenswrapper[4691]: I1124 08:01:03.330602 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-bx62n"] Nov 24 08:01:03 crc kubenswrapper[4691]: I1124 08:01:03.421826 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xlpmq\" (UniqueName: \"kubernetes.io/projected/7cd069de-809b-4bdf-8dad-51f00b26dcb0-kube-api-access-xlpmq\") pod \"7cd069de-809b-4bdf-8dad-51f00b26dcb0\" (UID: \"7cd069de-809b-4bdf-8dad-51f00b26dcb0\") " Nov 24 08:01:03 crc kubenswrapper[4691]: I1124 08:01:03.421884 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7cd069de-809b-4bdf-8dad-51f00b26dcb0-catalog-content\") pod \"7cd069de-809b-4bdf-8dad-51f00b26dcb0\" (UID: \"7cd069de-809b-4bdf-8dad-51f00b26dcb0\") " Nov 24 08:01:03 crc kubenswrapper[4691]: I1124 08:01:03.421978 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7cd069de-809b-4bdf-8dad-51f00b26dcb0-utilities\") pod \"7cd069de-809b-4bdf-8dad-51f00b26dcb0\" (UID: \"7cd069de-809b-4bdf-8dad-51f00b26dcb0\") " Nov 24 08:01:03 crc kubenswrapper[4691]: I1124 08:01:03.422763 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7cd069de-809b-4bdf-8dad-51f00b26dcb0-utilities" (OuterVolumeSpecName: "utilities") pod "7cd069de-809b-4bdf-8dad-51f00b26dcb0" (UID: "7cd069de-809b-4bdf-8dad-51f00b26dcb0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:01:03 crc kubenswrapper[4691]: I1124 08:01:03.428048 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7cd069de-809b-4bdf-8dad-51f00b26dcb0-kube-api-access-xlpmq" (OuterVolumeSpecName: "kube-api-access-xlpmq") pod "7cd069de-809b-4bdf-8dad-51f00b26dcb0" (UID: "7cd069de-809b-4bdf-8dad-51f00b26dcb0"). InnerVolumeSpecName "kube-api-access-xlpmq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:01:03 crc kubenswrapper[4691]: I1124 08:01:03.433388 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-cbnjg" Nov 24 08:01:03 crc kubenswrapper[4691]: I1124 08:01:03.475640 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-cbnjg" Nov 24 08:01:03 crc kubenswrapper[4691]: I1124 08:01:03.477157 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7cd069de-809b-4bdf-8dad-51f00b26dcb0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7cd069de-809b-4bdf-8dad-51f00b26dcb0" (UID: "7cd069de-809b-4bdf-8dad-51f00b26dcb0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:01:03 crc kubenswrapper[4691]: I1124 08:01:03.523623 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xlpmq\" (UniqueName: \"kubernetes.io/projected/7cd069de-809b-4bdf-8dad-51f00b26dcb0-kube-api-access-xlpmq\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:03 crc kubenswrapper[4691]: I1124 08:01:03.523675 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7cd069de-809b-4bdf-8dad-51f00b26dcb0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:03 crc kubenswrapper[4691]: I1124 08:01:03.523685 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7cd069de-809b-4bdf-8dad-51f00b26dcb0-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:04 crc kubenswrapper[4691]: I1124 08:01:04.163123 4691 generic.go:334] "Generic (PLEG): container finished" podID="a3e985ce-848b-49fc-99aa-8f9fc08820be" containerID="3c4b3d199c90125278d76be392a2460aec3c1b54266f5fb1800907596940628a" exitCode=0 Nov 24 08:01:04 crc kubenswrapper[4691]: I1124 08:01:04.164708 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xk274" event={"ID":"a3e985ce-848b-49fc-99aa-8f9fc08820be","Type":"ContainerDied","Data":"3c4b3d199c90125278d76be392a2460aec3c1b54266f5fb1800907596940628a"} Nov 24 08:01:04 crc kubenswrapper[4691]: I1124 08:01:04.168083 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mszmq" event={"ID":"7cd069de-809b-4bdf-8dad-51f00b26dcb0","Type":"ContainerDied","Data":"b61c83c955cebff1d65286977cf223aa1b707ffb229bfe422a0ff1953f6368f3"} Nov 24 08:01:04 crc kubenswrapper[4691]: I1124 08:01:04.168156 4691 scope.go:117] "RemoveContainer" containerID="a70093d839a559d9b0a6a29dd4d64f992aec8e4a22a025f4dc092faa3bb2d3f3" Nov 24 08:01:04 crc kubenswrapper[4691]: I1124 08:01:04.168375 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mszmq" Nov 24 08:01:04 crc kubenswrapper[4691]: I1124 08:01:04.171760 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2twgj" event={"ID":"d2f8a9dc-663e-4288-aa02-9e553a7b18a9","Type":"ContainerStarted","Data":"e72a7a4db7eebc4f58dc952fd5dd73c67693e48926fceb5ac05daa55ecd4f248"} Nov 24 08:01:04 crc kubenswrapper[4691]: I1124 08:01:04.183670 4691 scope.go:117] "RemoveContainer" containerID="591887209b409551283399c4fbc89f9c1718b56f363f28a9d718e7be0bed73d5" Nov 24 08:01:04 crc kubenswrapper[4691]: I1124 08:01:04.211385 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-2twgj" podStartSLOduration=2.056098934 podStartE2EDuration="51.211331985s" podCreationTimestamp="2025-11-24 08:00:13 +0000 UTC" firstStartedPulling="2025-11-24 08:00:14.74660839 +0000 UTC m=+176.745557639" lastFinishedPulling="2025-11-24 08:01:03.901841441 +0000 UTC m=+225.900790690" observedRunningTime="2025-11-24 08:01:04.204366483 +0000 UTC m=+226.203315732" watchObservedRunningTime="2025-11-24 08:01:04.211331985 +0000 UTC m=+226.210281234" Nov 24 08:01:04 crc kubenswrapper[4691]: I1124 08:01:04.222966 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-mszmq"] Nov 24 08:01:04 crc kubenswrapper[4691]: I1124 08:01:04.222990 4691 scope.go:117] "RemoveContainer" containerID="6594b5b821388ecf9dd7bcdde7c6161d12feee7bc142f5b8ab8a965c72813a37" Nov 24 08:01:04 crc kubenswrapper[4691]: I1124 08:01:04.243201 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-mszmq"] Nov 24 08:01:04 crc kubenswrapper[4691]: I1124 08:01:04.768745 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7cd069de-809b-4bdf-8dad-51f00b26dcb0" path="/var/lib/kubelet/pods/7cd069de-809b-4bdf-8dad-51f00b26dcb0/volumes" Nov 24 08:01:04 crc kubenswrapper[4691]: I1124 08:01:04.770119 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3ebf582-98e0-4899-885b-22a4289b2b4d" path="/var/lib/kubelet/pods/d3ebf582-98e0-4899-885b-22a4289b2b4d/volumes" Nov 24 08:01:05 crc kubenswrapper[4691]: I1124 08:01:05.180150 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xk274" event={"ID":"a3e985ce-848b-49fc-99aa-8f9fc08820be","Type":"ContainerStarted","Data":"bf85655ea6eaa32c181ffca19fd7477b99c5178f0e74343b7847ce0630b2f284"} Nov 24 08:01:05 crc kubenswrapper[4691]: I1124 08:01:05.211426 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-xk274" podStartSLOduration=3.342107794 podStartE2EDuration="53.211403507s" podCreationTimestamp="2025-11-24 08:00:12 +0000 UTC" firstStartedPulling="2025-11-24 08:00:14.746117776 +0000 UTC m=+176.745067035" lastFinishedPulling="2025-11-24 08:01:04.615413499 +0000 UTC m=+226.614362748" observedRunningTime="2025-11-24 08:01:05.2078918 +0000 UTC m=+227.206841049" watchObservedRunningTime="2025-11-24 08:01:05.211403507 +0000 UTC m=+227.210352756" Nov 24 08:01:10 crc kubenswrapper[4691]: I1124 08:01:10.341065 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-pnnmn" Nov 24 08:01:10 crc kubenswrapper[4691]: I1124 08:01:10.393517 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-pnnmn" Nov 24 08:01:12 crc kubenswrapper[4691]: I1124 08:01:12.295962 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-2t866" Nov 24 08:01:12 crc kubenswrapper[4691]: I1124 08:01:12.345174 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-2t866" Nov 24 08:01:12 crc kubenswrapper[4691]: I1124 08:01:12.687479 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-xk274" Nov 24 08:01:12 crc kubenswrapper[4691]: I1124 08:01:12.687625 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-xk274" Nov 24 08:01:12 crc kubenswrapper[4691]: I1124 08:01:12.727174 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-xk274" Nov 24 08:01:13 crc kubenswrapper[4691]: I1124 08:01:13.268646 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-xk274" Nov 24 08:01:13 crc kubenswrapper[4691]: I1124 08:01:13.691217 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-2twgj" Nov 24 08:01:13 crc kubenswrapper[4691]: I1124 08:01:13.691429 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-2twgj" Nov 24 08:01:13 crc kubenswrapper[4691]: I1124 08:01:13.730613 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-2twgj" Nov 24 08:01:14 crc kubenswrapper[4691]: I1124 08:01:14.286927 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-2twgj" Nov 24 08:01:15 crc kubenswrapper[4691]: I1124 08:01:15.392888 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xk274"] Nov 24 08:01:15 crc kubenswrapper[4691]: I1124 08:01:15.393146 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-xk274" podUID="a3e985ce-848b-49fc-99aa-8f9fc08820be" containerName="registry-server" containerID="cri-o://bf85655ea6eaa32c181ffca19fd7477b99c5178f0e74343b7847ce0630b2f284" gracePeriod=2 Nov 24 08:01:15 crc kubenswrapper[4691]: I1124 08:01:15.592105 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2twgj"] Nov 24 08:01:16 crc kubenswrapper[4691]: I1124 08:01:16.242012 4691 generic.go:334] "Generic (PLEG): container finished" podID="a3e985ce-848b-49fc-99aa-8f9fc08820be" containerID="bf85655ea6eaa32c181ffca19fd7477b99c5178f0e74343b7847ce0630b2f284" exitCode=0 Nov 24 08:01:16 crc kubenswrapper[4691]: I1124 08:01:16.242072 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xk274" event={"ID":"a3e985ce-848b-49fc-99aa-8f9fc08820be","Type":"ContainerDied","Data":"bf85655ea6eaa32c181ffca19fd7477b99c5178f0e74343b7847ce0630b2f284"} Nov 24 08:01:16 crc kubenswrapper[4691]: I1124 08:01:16.475612 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xk274" Nov 24 08:01:16 crc kubenswrapper[4691]: I1124 08:01:16.611192 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5ph76\" (UniqueName: \"kubernetes.io/projected/a3e985ce-848b-49fc-99aa-8f9fc08820be-kube-api-access-5ph76\") pod \"a3e985ce-848b-49fc-99aa-8f9fc08820be\" (UID: \"a3e985ce-848b-49fc-99aa-8f9fc08820be\") " Nov 24 08:01:16 crc kubenswrapper[4691]: I1124 08:01:16.611359 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a3e985ce-848b-49fc-99aa-8f9fc08820be-catalog-content\") pod \"a3e985ce-848b-49fc-99aa-8f9fc08820be\" (UID: \"a3e985ce-848b-49fc-99aa-8f9fc08820be\") " Nov 24 08:01:16 crc kubenswrapper[4691]: I1124 08:01:16.611417 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a3e985ce-848b-49fc-99aa-8f9fc08820be-utilities\") pod \"a3e985ce-848b-49fc-99aa-8f9fc08820be\" (UID: \"a3e985ce-848b-49fc-99aa-8f9fc08820be\") " Nov 24 08:01:16 crc kubenswrapper[4691]: I1124 08:01:16.612590 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a3e985ce-848b-49fc-99aa-8f9fc08820be-utilities" (OuterVolumeSpecName: "utilities") pod "a3e985ce-848b-49fc-99aa-8f9fc08820be" (UID: "a3e985ce-848b-49fc-99aa-8f9fc08820be"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:01:16 crc kubenswrapper[4691]: I1124 08:01:16.619729 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a3e985ce-848b-49fc-99aa-8f9fc08820be-kube-api-access-5ph76" (OuterVolumeSpecName: "kube-api-access-5ph76") pod "a3e985ce-848b-49fc-99aa-8f9fc08820be" (UID: "a3e985ce-848b-49fc-99aa-8f9fc08820be"). InnerVolumeSpecName "kube-api-access-5ph76". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:01:16 crc kubenswrapper[4691]: I1124 08:01:16.630347 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a3e985ce-848b-49fc-99aa-8f9fc08820be-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a3e985ce-848b-49fc-99aa-8f9fc08820be" (UID: "a3e985ce-848b-49fc-99aa-8f9fc08820be"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:01:16 crc kubenswrapper[4691]: I1124 08:01:16.715756 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a3e985ce-848b-49fc-99aa-8f9fc08820be-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:16 crc kubenswrapper[4691]: I1124 08:01:16.715921 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a3e985ce-848b-49fc-99aa-8f9fc08820be-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:16 crc kubenswrapper[4691]: I1124 08:01:16.715944 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5ph76\" (UniqueName: \"kubernetes.io/projected/a3e985ce-848b-49fc-99aa-8f9fc08820be-kube-api-access-5ph76\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:17 crc kubenswrapper[4691]: I1124 08:01:17.251156 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-2twgj" podUID="d2f8a9dc-663e-4288-aa02-9e553a7b18a9" containerName="registry-server" containerID="cri-o://e72a7a4db7eebc4f58dc952fd5dd73c67693e48926fceb5ac05daa55ecd4f248" gracePeriod=2 Nov 24 08:01:17 crc kubenswrapper[4691]: I1124 08:01:17.251895 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xk274" Nov 24 08:01:17 crc kubenswrapper[4691]: I1124 08:01:17.252303 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xk274" event={"ID":"a3e985ce-848b-49fc-99aa-8f9fc08820be","Type":"ContainerDied","Data":"eaa6a44815dc5fce735abe3b292d3266427d4748a79a17cd89148c5316b1c2e6"} Nov 24 08:01:17 crc kubenswrapper[4691]: I1124 08:01:17.252344 4691 scope.go:117] "RemoveContainer" containerID="bf85655ea6eaa32c181ffca19fd7477b99c5178f0e74343b7847ce0630b2f284" Nov 24 08:01:17 crc kubenswrapper[4691]: I1124 08:01:17.276646 4691 scope.go:117] "RemoveContainer" containerID="3c4b3d199c90125278d76be392a2460aec3c1b54266f5fb1800907596940628a" Nov 24 08:01:17 crc kubenswrapper[4691]: I1124 08:01:17.290764 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xk274"] Nov 24 08:01:17 crc kubenswrapper[4691]: I1124 08:01:17.294658 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-xk274"] Nov 24 08:01:17 crc kubenswrapper[4691]: I1124 08:01:17.305072 4691 scope.go:117] "RemoveContainer" containerID="8fdad7ad68e761db5244441649abdd1a2761268f94e9041b6cb07d219b632882" Nov 24 08:01:17 crc kubenswrapper[4691]: I1124 08:01:17.637363 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2twgj" Nov 24 08:01:17 crc kubenswrapper[4691]: I1124 08:01:17.730596 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2f8a9dc-663e-4288-aa02-9e553a7b18a9-catalog-content\") pod \"d2f8a9dc-663e-4288-aa02-9e553a7b18a9\" (UID: \"d2f8a9dc-663e-4288-aa02-9e553a7b18a9\") " Nov 24 08:01:17 crc kubenswrapper[4691]: I1124 08:01:17.730683 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2f8a9dc-663e-4288-aa02-9e553a7b18a9-utilities\") pod \"d2f8a9dc-663e-4288-aa02-9e553a7b18a9\" (UID: \"d2f8a9dc-663e-4288-aa02-9e553a7b18a9\") " Nov 24 08:01:17 crc kubenswrapper[4691]: I1124 08:01:17.730772 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q9grw\" (UniqueName: \"kubernetes.io/projected/d2f8a9dc-663e-4288-aa02-9e553a7b18a9-kube-api-access-q9grw\") pod \"d2f8a9dc-663e-4288-aa02-9e553a7b18a9\" (UID: \"d2f8a9dc-663e-4288-aa02-9e553a7b18a9\") " Nov 24 08:01:17 crc kubenswrapper[4691]: I1124 08:01:17.731657 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d2f8a9dc-663e-4288-aa02-9e553a7b18a9-utilities" (OuterVolumeSpecName: "utilities") pod "d2f8a9dc-663e-4288-aa02-9e553a7b18a9" (UID: "d2f8a9dc-663e-4288-aa02-9e553a7b18a9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:01:17 crc kubenswrapper[4691]: I1124 08:01:17.736864 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d2f8a9dc-663e-4288-aa02-9e553a7b18a9-kube-api-access-q9grw" (OuterVolumeSpecName: "kube-api-access-q9grw") pod "d2f8a9dc-663e-4288-aa02-9e553a7b18a9" (UID: "d2f8a9dc-663e-4288-aa02-9e553a7b18a9"). InnerVolumeSpecName "kube-api-access-q9grw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:01:17 crc kubenswrapper[4691]: I1124 08:01:17.829433 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d2f8a9dc-663e-4288-aa02-9e553a7b18a9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d2f8a9dc-663e-4288-aa02-9e553a7b18a9" (UID: "d2f8a9dc-663e-4288-aa02-9e553a7b18a9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:01:17 crc kubenswrapper[4691]: I1124 08:01:17.833082 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2f8a9dc-663e-4288-aa02-9e553a7b18a9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:17 crc kubenswrapper[4691]: I1124 08:01:17.833125 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2f8a9dc-663e-4288-aa02-9e553a7b18a9-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:17 crc kubenswrapper[4691]: I1124 08:01:17.833137 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q9grw\" (UniqueName: \"kubernetes.io/projected/d2f8a9dc-663e-4288-aa02-9e553a7b18a9-kube-api-access-q9grw\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:18 crc kubenswrapper[4691]: I1124 08:01:18.259801 4691 generic.go:334] "Generic (PLEG): container finished" podID="d2f8a9dc-663e-4288-aa02-9e553a7b18a9" containerID="e72a7a4db7eebc4f58dc952fd5dd73c67693e48926fceb5ac05daa55ecd4f248" exitCode=0 Nov 24 08:01:18 crc kubenswrapper[4691]: I1124 08:01:18.259904 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2twgj" Nov 24 08:01:18 crc kubenswrapper[4691]: I1124 08:01:18.259932 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2twgj" event={"ID":"d2f8a9dc-663e-4288-aa02-9e553a7b18a9","Type":"ContainerDied","Data":"e72a7a4db7eebc4f58dc952fd5dd73c67693e48926fceb5ac05daa55ecd4f248"} Nov 24 08:01:18 crc kubenswrapper[4691]: I1124 08:01:18.260077 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2twgj" event={"ID":"d2f8a9dc-663e-4288-aa02-9e553a7b18a9","Type":"ContainerDied","Data":"c3a0f29939f046dcc1c879b7d3651058d457e713ea36ead672327514c4a9bef1"} Nov 24 08:01:18 crc kubenswrapper[4691]: I1124 08:01:18.260110 4691 scope.go:117] "RemoveContainer" containerID="e72a7a4db7eebc4f58dc952fd5dd73c67693e48926fceb5ac05daa55ecd4f248" Nov 24 08:01:18 crc kubenswrapper[4691]: I1124 08:01:18.284401 4691 scope.go:117] "RemoveContainer" containerID="c3bef42c2a00d2af70dd4004423feb5a8e199affcdf0e961beb0a6aff7e9e76b" Nov 24 08:01:18 crc kubenswrapper[4691]: I1124 08:01:18.295876 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2twgj"] Nov 24 08:01:18 crc kubenswrapper[4691]: I1124 08:01:18.299766 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-2twgj"] Nov 24 08:01:18 crc kubenswrapper[4691]: I1124 08:01:18.318241 4691 scope.go:117] "RemoveContainer" containerID="18d64c0b0992a8e53fd7c91e4483ffb38a89a86e30966f6991ddce31fa5d9f6d" Nov 24 08:01:18 crc kubenswrapper[4691]: I1124 08:01:18.336361 4691 scope.go:117] "RemoveContainer" containerID="e72a7a4db7eebc4f58dc952fd5dd73c67693e48926fceb5ac05daa55ecd4f248" Nov 24 08:01:18 crc kubenswrapper[4691]: E1124 08:01:18.337058 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e72a7a4db7eebc4f58dc952fd5dd73c67693e48926fceb5ac05daa55ecd4f248\": container with ID starting with e72a7a4db7eebc4f58dc952fd5dd73c67693e48926fceb5ac05daa55ecd4f248 not found: ID does not exist" containerID="e72a7a4db7eebc4f58dc952fd5dd73c67693e48926fceb5ac05daa55ecd4f248" Nov 24 08:01:18 crc kubenswrapper[4691]: I1124 08:01:18.337111 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e72a7a4db7eebc4f58dc952fd5dd73c67693e48926fceb5ac05daa55ecd4f248"} err="failed to get container status \"e72a7a4db7eebc4f58dc952fd5dd73c67693e48926fceb5ac05daa55ecd4f248\": rpc error: code = NotFound desc = could not find container \"e72a7a4db7eebc4f58dc952fd5dd73c67693e48926fceb5ac05daa55ecd4f248\": container with ID starting with e72a7a4db7eebc4f58dc952fd5dd73c67693e48926fceb5ac05daa55ecd4f248 not found: ID does not exist" Nov 24 08:01:18 crc kubenswrapper[4691]: I1124 08:01:18.337135 4691 scope.go:117] "RemoveContainer" containerID="c3bef42c2a00d2af70dd4004423feb5a8e199affcdf0e961beb0a6aff7e9e76b" Nov 24 08:01:18 crc kubenswrapper[4691]: E1124 08:01:18.337578 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c3bef42c2a00d2af70dd4004423feb5a8e199affcdf0e961beb0a6aff7e9e76b\": container with ID starting with c3bef42c2a00d2af70dd4004423feb5a8e199affcdf0e961beb0a6aff7e9e76b not found: ID does not exist" containerID="c3bef42c2a00d2af70dd4004423feb5a8e199affcdf0e961beb0a6aff7e9e76b" Nov 24 08:01:18 crc kubenswrapper[4691]: I1124 08:01:18.337614 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c3bef42c2a00d2af70dd4004423feb5a8e199affcdf0e961beb0a6aff7e9e76b"} err="failed to get container status \"c3bef42c2a00d2af70dd4004423feb5a8e199affcdf0e961beb0a6aff7e9e76b\": rpc error: code = NotFound desc = could not find container \"c3bef42c2a00d2af70dd4004423feb5a8e199affcdf0e961beb0a6aff7e9e76b\": container with ID starting with c3bef42c2a00d2af70dd4004423feb5a8e199affcdf0e961beb0a6aff7e9e76b not found: ID does not exist" Nov 24 08:01:18 crc kubenswrapper[4691]: I1124 08:01:18.337637 4691 scope.go:117] "RemoveContainer" containerID="18d64c0b0992a8e53fd7c91e4483ffb38a89a86e30966f6991ddce31fa5d9f6d" Nov 24 08:01:18 crc kubenswrapper[4691]: E1124 08:01:18.338033 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"18d64c0b0992a8e53fd7c91e4483ffb38a89a86e30966f6991ddce31fa5d9f6d\": container with ID starting with 18d64c0b0992a8e53fd7c91e4483ffb38a89a86e30966f6991ddce31fa5d9f6d not found: ID does not exist" containerID="18d64c0b0992a8e53fd7c91e4483ffb38a89a86e30966f6991ddce31fa5d9f6d" Nov 24 08:01:18 crc kubenswrapper[4691]: I1124 08:01:18.338058 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18d64c0b0992a8e53fd7c91e4483ffb38a89a86e30966f6991ddce31fa5d9f6d"} err="failed to get container status \"18d64c0b0992a8e53fd7c91e4483ffb38a89a86e30966f6991ddce31fa5d9f6d\": rpc error: code = NotFound desc = could not find container \"18d64c0b0992a8e53fd7c91e4483ffb38a89a86e30966f6991ddce31fa5d9f6d\": container with ID starting with 18d64c0b0992a8e53fd7c91e4483ffb38a89a86e30966f6991ddce31fa5d9f6d not found: ID does not exist" Nov 24 08:01:18 crc kubenswrapper[4691]: I1124 08:01:18.768670 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a3e985ce-848b-49fc-99aa-8f9fc08820be" path="/var/lib/kubelet/pods/a3e985ce-848b-49fc-99aa-8f9fc08820be/volumes" Nov 24 08:01:18 crc kubenswrapper[4691]: I1124 08:01:18.769877 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d2f8a9dc-663e-4288-aa02-9e553a7b18a9" path="/var/lib/kubelet/pods/d2f8a9dc-663e-4288-aa02-9e553a7b18a9/volumes" Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.061966 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" podUID="dd11e81f-c100-44a8-bc17-2ae2c0b2788d" containerName="oauth-openshift" containerID="cri-o://a46f7fc7ddfc558defdd6df8010bdc98d624b6717fafe34b6693a7eefbbc6552" gracePeriod=15 Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.309824 4691 generic.go:334] "Generic (PLEG): container finished" podID="dd11e81f-c100-44a8-bc17-2ae2c0b2788d" containerID="a46f7fc7ddfc558defdd6df8010bdc98d624b6717fafe34b6693a7eefbbc6552" exitCode=0 Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.309892 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" event={"ID":"dd11e81f-c100-44a8-bc17-2ae2c0b2788d","Type":"ContainerDied","Data":"a46f7fc7ddfc558defdd6df8010bdc98d624b6717fafe34b6693a7eefbbc6552"} Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.442182 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.629595 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-ocp-branding-template\") pod \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.629652 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-audit-policies\") pod \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.629711 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-service-ca\") pod \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.629740 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-trusted-ca-bundle\") pod \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.629789 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-user-template-provider-selection\") pod \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.629818 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-serving-cert\") pod \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.629839 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-user-idp-0-file-data\") pod \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.629885 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-user-template-login\") pod \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.629914 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vmtwh\" (UniqueName: \"kubernetes.io/projected/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-kube-api-access-vmtwh\") pod \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.629968 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-cliconfig\") pod \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.630000 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-audit-dir\") pod \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.630025 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-router-certs\") pod \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.630042 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-session\") pod \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.630065 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-user-template-error\") pod \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\" (UID: \"dd11e81f-c100-44a8-bc17-2ae2c0b2788d\") " Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.630948 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "dd11e81f-c100-44a8-bc17-2ae2c0b2788d" (UID: "dd11e81f-c100-44a8-bc17-2ae2c0b2788d"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.630967 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "dd11e81f-c100-44a8-bc17-2ae2c0b2788d" (UID: "dd11e81f-c100-44a8-bc17-2ae2c0b2788d"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.631015 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "dd11e81f-c100-44a8-bc17-2ae2c0b2788d" (UID: "dd11e81f-c100-44a8-bc17-2ae2c0b2788d"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.631063 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "dd11e81f-c100-44a8-bc17-2ae2c0b2788d" (UID: "dd11e81f-c100-44a8-bc17-2ae2c0b2788d"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.631795 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "dd11e81f-c100-44a8-bc17-2ae2c0b2788d" (UID: "dd11e81f-c100-44a8-bc17-2ae2c0b2788d"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.638640 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-kube-api-access-vmtwh" (OuterVolumeSpecName: "kube-api-access-vmtwh") pod "dd11e81f-c100-44a8-bc17-2ae2c0b2788d" (UID: "dd11e81f-c100-44a8-bc17-2ae2c0b2788d"). InnerVolumeSpecName "kube-api-access-vmtwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.638662 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "dd11e81f-c100-44a8-bc17-2ae2c0b2788d" (UID: "dd11e81f-c100-44a8-bc17-2ae2c0b2788d"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.639066 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "dd11e81f-c100-44a8-bc17-2ae2c0b2788d" (UID: "dd11e81f-c100-44a8-bc17-2ae2c0b2788d"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.641652 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "dd11e81f-c100-44a8-bc17-2ae2c0b2788d" (UID: "dd11e81f-c100-44a8-bc17-2ae2c0b2788d"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.642118 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "dd11e81f-c100-44a8-bc17-2ae2c0b2788d" (UID: "dd11e81f-c100-44a8-bc17-2ae2c0b2788d"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.642280 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "dd11e81f-c100-44a8-bc17-2ae2c0b2788d" (UID: "dd11e81f-c100-44a8-bc17-2ae2c0b2788d"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.642786 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "dd11e81f-c100-44a8-bc17-2ae2c0b2788d" (UID: "dd11e81f-c100-44a8-bc17-2ae2c0b2788d"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.643220 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "dd11e81f-c100-44a8-bc17-2ae2c0b2788d" (UID: "dd11e81f-c100-44a8-bc17-2ae2c0b2788d"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.643893 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "dd11e81f-c100-44a8-bc17-2ae2c0b2788d" (UID: "dd11e81f-c100-44a8-bc17-2ae2c0b2788d"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.732547 4691 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.732614 4691 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.732637 4691 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.732661 4691 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.732680 4691 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.732700 4691 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.732718 4691 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.732735 4691 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.732755 4691 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.732773 4691 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.732795 4691 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.732813 4691 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.732830 4691 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:24 crc kubenswrapper[4691]: I1124 08:01:24.732848 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vmtwh\" (UniqueName: \"kubernetes.io/projected/dd11e81f-c100-44a8-bc17-2ae2c0b2788d-kube-api-access-vmtwh\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.056237 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-985c66b4-22c68"] Nov 24 08:01:25 crc kubenswrapper[4691]: E1124 08:01:25.056518 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3e985ce-848b-49fc-99aa-8f9fc08820be" containerName="registry-server" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.056532 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3e985ce-848b-49fc-99aa-8f9fc08820be" containerName="registry-server" Nov 24 08:01:25 crc kubenswrapper[4691]: E1124 08:01:25.056543 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7cd069de-809b-4bdf-8dad-51f00b26dcb0" containerName="registry-server" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.056548 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="7cd069de-809b-4bdf-8dad-51f00b26dcb0" containerName="registry-server" Nov 24 08:01:25 crc kubenswrapper[4691]: E1124 08:01:25.056559 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2f8a9dc-663e-4288-aa02-9e553a7b18a9" containerName="registry-server" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.056568 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2f8a9dc-663e-4288-aa02-9e553a7b18a9" containerName="registry-server" Nov 24 08:01:25 crc kubenswrapper[4691]: E1124 08:01:25.056579 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3e985ce-848b-49fc-99aa-8f9fc08820be" containerName="extract-utilities" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.056585 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3e985ce-848b-49fc-99aa-8f9fc08820be" containerName="extract-utilities" Nov 24 08:01:25 crc kubenswrapper[4691]: E1124 08:01:25.056596 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd11e81f-c100-44a8-bc17-2ae2c0b2788d" containerName="oauth-openshift" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.056601 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd11e81f-c100-44a8-bc17-2ae2c0b2788d" containerName="oauth-openshift" Nov 24 08:01:25 crc kubenswrapper[4691]: E1124 08:01:25.056611 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3ebf582-98e0-4899-885b-22a4289b2b4d" containerName="extract-content" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.056617 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3ebf582-98e0-4899-885b-22a4289b2b4d" containerName="extract-content" Nov 24 08:01:25 crc kubenswrapper[4691]: E1124 08:01:25.056626 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2f8a9dc-663e-4288-aa02-9e553a7b18a9" containerName="extract-content" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.056632 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2f8a9dc-663e-4288-aa02-9e553a7b18a9" containerName="extract-content" Nov 24 08:01:25 crc kubenswrapper[4691]: E1124 08:01:25.056641 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3ebf582-98e0-4899-885b-22a4289b2b4d" containerName="extract-utilities" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.056650 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3ebf582-98e0-4899-885b-22a4289b2b4d" containerName="extract-utilities" Nov 24 08:01:25 crc kubenswrapper[4691]: E1124 08:01:25.056662 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7cd069de-809b-4bdf-8dad-51f00b26dcb0" containerName="extract-utilities" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.056667 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="7cd069de-809b-4bdf-8dad-51f00b26dcb0" containerName="extract-utilities" Nov 24 08:01:25 crc kubenswrapper[4691]: E1124 08:01:25.056676 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="310b3b1c-76c2-4eca-afab-78c468d3b2a8" containerName="pruner" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.056682 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="310b3b1c-76c2-4eca-afab-78c468d3b2a8" containerName="pruner" Nov 24 08:01:25 crc kubenswrapper[4691]: E1124 08:01:25.056694 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2f8a9dc-663e-4288-aa02-9e553a7b18a9" containerName="extract-utilities" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.056700 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2f8a9dc-663e-4288-aa02-9e553a7b18a9" containerName="extract-utilities" Nov 24 08:01:25 crc kubenswrapper[4691]: E1124 08:01:25.056706 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7cd069de-809b-4bdf-8dad-51f00b26dcb0" containerName="extract-content" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.056711 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="7cd069de-809b-4bdf-8dad-51f00b26dcb0" containerName="extract-content" Nov 24 08:01:25 crc kubenswrapper[4691]: E1124 08:01:25.056718 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3ebf582-98e0-4899-885b-22a4289b2b4d" containerName="registry-server" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.056724 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3ebf582-98e0-4899-885b-22a4289b2b4d" containerName="registry-server" Nov 24 08:01:25 crc kubenswrapper[4691]: E1124 08:01:25.056733 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3fa49d03-3c6c-42c1-a0ce-d462695f1e2c" containerName="pruner" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.056739 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="3fa49d03-3c6c-42c1-a0ce-d462695f1e2c" containerName="pruner" Nov 24 08:01:25 crc kubenswrapper[4691]: E1124 08:01:25.056745 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3e985ce-848b-49fc-99aa-8f9fc08820be" containerName="extract-content" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.056751 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3e985ce-848b-49fc-99aa-8f9fc08820be" containerName="extract-content" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.056853 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="d2f8a9dc-663e-4288-aa02-9e553a7b18a9" containerName="registry-server" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.056863 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="3fa49d03-3c6c-42c1-a0ce-d462695f1e2c" containerName="pruner" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.056871 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd11e81f-c100-44a8-bc17-2ae2c0b2788d" containerName="oauth-openshift" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.056877 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="7cd069de-809b-4bdf-8dad-51f00b26dcb0" containerName="registry-server" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.056888 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="a3e985ce-848b-49fc-99aa-8f9fc08820be" containerName="registry-server" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.056898 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="310b3b1c-76c2-4eca-afab-78c468d3b2a8" containerName="pruner" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.056905 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3ebf582-98e0-4899-885b-22a4289b2b4d" containerName="registry-server" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.057315 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.078060 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-985c66b4-22c68"] Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.240039 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/c2e7770a-34e9-4417-bd2e-2b81047c1503-v4-0-config-system-serving-cert\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.240127 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/c2e7770a-34e9-4417-bd2e-2b81047c1503-v4-0-config-system-router-certs\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.240167 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/c2e7770a-34e9-4417-bd2e-2b81047c1503-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.240430 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/c2e7770a-34e9-4417-bd2e-2b81047c1503-v4-0-config-system-session\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.240591 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/c2e7770a-34e9-4417-bd2e-2b81047c1503-audit-policies\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.240793 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/c2e7770a-34e9-4417-bd2e-2b81047c1503-v4-0-config-system-service-ca\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.240866 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/c2e7770a-34e9-4417-bd2e-2b81047c1503-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.241050 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c2e7770a-34e9-4417-bd2e-2b81047c1503-audit-dir\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.241131 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/c2e7770a-34e9-4417-bd2e-2b81047c1503-v4-0-config-system-cliconfig\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.241187 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/c2e7770a-34e9-4417-bd2e-2b81047c1503-v4-0-config-user-template-login\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.241274 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c2e7770a-34e9-4417-bd2e-2b81047c1503-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.241420 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/c2e7770a-34e9-4417-bd2e-2b81047c1503-v4-0-config-user-template-error\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.241500 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x4sqr\" (UniqueName: \"kubernetes.io/projected/c2e7770a-34e9-4417-bd2e-2b81047c1503-kube-api-access-x4sqr\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.241536 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/c2e7770a-34e9-4417-bd2e-2b81047c1503-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.320723 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" event={"ID":"dd11e81f-c100-44a8-bc17-2ae2c0b2788d","Type":"ContainerDied","Data":"73139684ca432dff6e6e035ba845450047443e81ef687423d18bae077f80720f"} Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.320775 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-h2rcj" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.320817 4691 scope.go:117] "RemoveContainer" containerID="a46f7fc7ddfc558defdd6df8010bdc98d624b6717fafe34b6693a7eefbbc6552" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.339604 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-h2rcj"] Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.341882 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-h2rcj"] Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.343389 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/c2e7770a-34e9-4417-bd2e-2b81047c1503-v4-0-config-system-session\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.343434 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/c2e7770a-34e9-4417-bd2e-2b81047c1503-audit-policies\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.343504 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/c2e7770a-34e9-4417-bd2e-2b81047c1503-v4-0-config-system-service-ca\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.343697 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/c2e7770a-34e9-4417-bd2e-2b81047c1503-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.345221 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/c2e7770a-34e9-4417-bd2e-2b81047c1503-audit-policies\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.345601 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c2e7770a-34e9-4417-bd2e-2b81047c1503-audit-dir\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.345631 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/c2e7770a-34e9-4417-bd2e-2b81047c1503-v4-0-config-system-cliconfig\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.345665 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/c2e7770a-34e9-4417-bd2e-2b81047c1503-v4-0-config-user-template-login\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.345692 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c2e7770a-34e9-4417-bd2e-2b81047c1503-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.345746 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/c2e7770a-34e9-4417-bd2e-2b81047c1503-v4-0-config-user-template-error\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.345772 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x4sqr\" (UniqueName: \"kubernetes.io/projected/c2e7770a-34e9-4417-bd2e-2b81047c1503-kube-api-access-x4sqr\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.345794 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/c2e7770a-34e9-4417-bd2e-2b81047c1503-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.345823 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/c2e7770a-34e9-4417-bd2e-2b81047c1503-v4-0-config-system-serving-cert\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.345816 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/c2e7770a-34e9-4417-bd2e-2b81047c1503-v4-0-config-system-service-ca\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.345843 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/c2e7770a-34e9-4417-bd2e-2b81047c1503-v4-0-config-system-router-certs\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.345953 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/c2e7770a-34e9-4417-bd2e-2b81047c1503-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.346792 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c2e7770a-34e9-4417-bd2e-2b81047c1503-audit-dir\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.347077 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c2e7770a-34e9-4417-bd2e-2b81047c1503-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.347330 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/c2e7770a-34e9-4417-bd2e-2b81047c1503-v4-0-config-system-cliconfig\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.349989 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/c2e7770a-34e9-4417-bd2e-2b81047c1503-v4-0-config-user-template-error\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.352007 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/c2e7770a-34e9-4417-bd2e-2b81047c1503-v4-0-config-system-serving-cert\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.352248 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/c2e7770a-34e9-4417-bd2e-2b81047c1503-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.352958 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/c2e7770a-34e9-4417-bd2e-2b81047c1503-v4-0-config-user-template-login\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.353718 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/c2e7770a-34e9-4417-bd2e-2b81047c1503-v4-0-config-system-router-certs\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.355676 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/c2e7770a-34e9-4417-bd2e-2b81047c1503-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.360166 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/c2e7770a-34e9-4417-bd2e-2b81047c1503-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.360278 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/c2e7770a-34e9-4417-bd2e-2b81047c1503-v4-0-config-system-session\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.373693 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x4sqr\" (UniqueName: \"kubernetes.io/projected/c2e7770a-34e9-4417-bd2e-2b81047c1503-kube-api-access-x4sqr\") pod \"oauth-openshift-985c66b4-22c68\" (UID: \"c2e7770a-34e9-4417-bd2e-2b81047c1503\") " pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:25 crc kubenswrapper[4691]: I1124 08:01:25.672917 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:26 crc kubenswrapper[4691]: I1124 08:01:26.154967 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-985c66b4-22c68"] Nov 24 08:01:26 crc kubenswrapper[4691]: I1124 08:01:26.327676 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-985c66b4-22c68" event={"ID":"c2e7770a-34e9-4417-bd2e-2b81047c1503","Type":"ContainerStarted","Data":"c887a9c91bb88a5d8477dedd192a4f2137f7ea344168db7780d3e15543230d00"} Nov 24 08:01:26 crc kubenswrapper[4691]: I1124 08:01:26.766741 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd11e81f-c100-44a8-bc17-2ae2c0b2788d" path="/var/lib/kubelet/pods/dd11e81f-c100-44a8-bc17-2ae2c0b2788d/volumes" Nov 24 08:01:27 crc kubenswrapper[4691]: I1124 08:01:27.337239 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-985c66b4-22c68" event={"ID":"c2e7770a-34e9-4417-bd2e-2b81047c1503","Type":"ContainerStarted","Data":"9edeac4eac29bac96081eeef3b97b207b847de35aa8e7c20eff02f1e916d65e2"} Nov 24 08:01:27 crc kubenswrapper[4691]: I1124 08:01:27.337692 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:27 crc kubenswrapper[4691]: I1124 08:01:27.349815 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-985c66b4-22c68" Nov 24 08:01:27 crc kubenswrapper[4691]: I1124 08:01:27.368759 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-985c66b4-22c68" podStartSLOduration=28.368534517 podStartE2EDuration="28.368534517s" podCreationTimestamp="2025-11-24 08:00:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:01:27.366037061 +0000 UTC m=+249.364986350" watchObservedRunningTime="2025-11-24 08:01:27.368534517 +0000 UTC m=+249.367483806" Nov 24 08:01:47 crc kubenswrapper[4691]: I1124 08:01:47.790511 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 08:01:47 crc kubenswrapper[4691]: I1124 08:01:47.792980 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 24 08:01:47 crc kubenswrapper[4691]: I1124 08:01:47.826424 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 08:01:47 crc kubenswrapper[4691]: I1124 08:01:47.892952 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 08:01:47 crc kubenswrapper[4691]: I1124 08:01:47.893098 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 08:01:47 crc kubenswrapper[4691]: I1124 08:01:47.893357 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 08:01:47 crc kubenswrapper[4691]: I1124 08:01:47.895791 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 24 08:01:47 crc kubenswrapper[4691]: I1124 08:01:47.895827 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 24 08:01:47 crc kubenswrapper[4691]: I1124 08:01:47.905496 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 08:01:47 crc kubenswrapper[4691]: I1124 08:01:47.906636 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 24 08:01:47 crc kubenswrapper[4691]: I1124 08:01:47.922809 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 08:01:47 crc kubenswrapper[4691]: I1124 08:01:47.922862 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 08:01:47 crc kubenswrapper[4691]: I1124 08:01:47.980833 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 08:01:48 crc kubenswrapper[4691]: I1124 08:01:48.176244 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 24 08:01:48 crc kubenswrapper[4691]: I1124 08:01:48.183358 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 24 08:01:48 crc kubenswrapper[4691]: I1124 08:01:48.484427 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"5b87b97308f368078088c8b4e5f20843429179d7c0486f18f2600439b2d3cead"} Nov 24 08:01:48 crc kubenswrapper[4691]: I1124 08:01:48.491602 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"eaed5978a1a516ec9ad4db00c1e84074ac143ba6b66316988a6ec69054d6142b"} Nov 24 08:01:48 crc kubenswrapper[4691]: W1124 08:01:48.497768 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-eb4e8f34eba02ad9ad4ace01160b46327f8c07f329fb138eee2c931511e71297 WatchSource:0}: Error finding container eb4e8f34eba02ad9ad4ace01160b46327f8c07f329fb138eee2c931511e71297: Status 404 returned error can't find the container with id eb4e8f34eba02ad9ad4ace01160b46327f8c07f329fb138eee2c931511e71297 Nov 24 08:01:49 crc kubenswrapper[4691]: I1124 08:01:49.497870 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"7ec675072743b629d60229eb2314f998b7817ab2a1ad16b95f4cfc82cc4afa8b"} Nov 24 08:01:49 crc kubenswrapper[4691]: I1124 08:01:49.499498 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 08:01:49 crc kubenswrapper[4691]: I1124 08:01:49.500893 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"fc3af1bf898c2ce8c74fa95d8022d4c7e4ab324d23d73e3f19587da519d91339"} Nov 24 08:01:49 crc kubenswrapper[4691]: I1124 08:01:49.502904 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"4e5d41f2e6567bac714957ce83e3d0a04d8793340151c24931b5d76723ceff32"} Nov 24 08:01:49 crc kubenswrapper[4691]: I1124 08:01:49.502966 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"eb4e8f34eba02ad9ad4ace01160b46327f8c07f329fb138eee2c931511e71297"} Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.153403 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5r6nd"] Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.154568 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-5r6nd" podUID="ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e" containerName="registry-server" containerID="cri-o://ce04c23a0bfb4dd3ca78ba9c16fc588942c0ac0c32675e14b8ac96ad61f32c10" gracePeriod=30 Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.166255 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pnnmn"] Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.166610 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-pnnmn" podUID="9312bc5d-54a5-4172-9674-1afebef9cc98" containerName="registry-server" containerID="cri-o://e84332a2ddcf939d9b273ac6567be27e1871d0d05b00708654ad2f8f49cd7ae6" gracePeriod=30 Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.178552 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-cbkxk"] Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.180141 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-cbkxk" podUID="28d1a6e7-60cf-4233-9298-4a561b105271" containerName="marketplace-operator" containerID="cri-o://3fad668274b2906cb8bc8f84b7d0b0ef3e5fc19797c064fd2e48ef1cdeec7bf4" gracePeriod=30 Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.187880 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2t866"] Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.188152 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-2t866" podUID="50e404aa-af0b-471a-9289-ab9bb5317ffc" containerName="registry-server" containerID="cri-o://1da44a96efd55d3c0b8a902e184022f8dd8e3d5b611502eb2c86cee8206c93a6" gracePeriod=30 Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.197658 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-wgwxf"] Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.198892 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-wgwxf" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.205069 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cbnjg"] Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.222606 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-cbnjg" podUID="5c3c36fe-8ff0-4639-ae9c-e69785ea4611" containerName="registry-server" containerID="cri-o://645f216e02f7bf0274cab15a8cb0937543d6b08d03cbad35f27a10b46a46c08a" gracePeriod=30 Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.231130 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-wgwxf"] Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.267948 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/74a9daa2-7bfc-487c-9990-9848391da95d-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-wgwxf\" (UID: \"74a9daa2-7bfc-487c-9990-9848391da95d\") " pod="openshift-marketplace/marketplace-operator-79b997595-wgwxf" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.268024 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-56g84\" (UniqueName: \"kubernetes.io/projected/74a9daa2-7bfc-487c-9990-9848391da95d-kube-api-access-56g84\") pod \"marketplace-operator-79b997595-wgwxf\" (UID: \"74a9daa2-7bfc-487c-9990-9848391da95d\") " pod="openshift-marketplace/marketplace-operator-79b997595-wgwxf" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.268154 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/74a9daa2-7bfc-487c-9990-9848391da95d-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-wgwxf\" (UID: \"74a9daa2-7bfc-487c-9990-9848391da95d\") " pod="openshift-marketplace/marketplace-operator-79b997595-wgwxf" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.369389 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/74a9daa2-7bfc-487c-9990-9848391da95d-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-wgwxf\" (UID: \"74a9daa2-7bfc-487c-9990-9848391da95d\") " pod="openshift-marketplace/marketplace-operator-79b997595-wgwxf" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.369537 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-56g84\" (UniqueName: \"kubernetes.io/projected/74a9daa2-7bfc-487c-9990-9848391da95d-kube-api-access-56g84\") pod \"marketplace-operator-79b997595-wgwxf\" (UID: \"74a9daa2-7bfc-487c-9990-9848391da95d\") " pod="openshift-marketplace/marketplace-operator-79b997595-wgwxf" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.369585 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/74a9daa2-7bfc-487c-9990-9848391da95d-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-wgwxf\" (UID: \"74a9daa2-7bfc-487c-9990-9848391da95d\") " pod="openshift-marketplace/marketplace-operator-79b997595-wgwxf" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.373645 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/74a9daa2-7bfc-487c-9990-9848391da95d-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-wgwxf\" (UID: \"74a9daa2-7bfc-487c-9990-9848391da95d\") " pod="openshift-marketplace/marketplace-operator-79b997595-wgwxf" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.385587 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/74a9daa2-7bfc-487c-9990-9848391da95d-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-wgwxf\" (UID: \"74a9daa2-7bfc-487c-9990-9848391da95d\") " pod="openshift-marketplace/marketplace-operator-79b997595-wgwxf" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.391953 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-56g84\" (UniqueName: \"kubernetes.io/projected/74a9daa2-7bfc-487c-9990-9848391da95d-kube-api-access-56g84\") pod \"marketplace-operator-79b997595-wgwxf\" (UID: \"74a9daa2-7bfc-487c-9990-9848391da95d\") " pod="openshift-marketplace/marketplace-operator-79b997595-wgwxf" Nov 24 08:01:53 crc kubenswrapper[4691]: E1124 08:01:53.394912 4691 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 645f216e02f7bf0274cab15a8cb0937543d6b08d03cbad35f27a10b46a46c08a is running failed: container process not found" containerID="645f216e02f7bf0274cab15a8cb0937543d6b08d03cbad35f27a10b46a46c08a" cmd=["grpc_health_probe","-addr=:50051"] Nov 24 08:01:53 crc kubenswrapper[4691]: E1124 08:01:53.395140 4691 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 645f216e02f7bf0274cab15a8cb0937543d6b08d03cbad35f27a10b46a46c08a is running failed: container process not found" containerID="645f216e02f7bf0274cab15a8cb0937543d6b08d03cbad35f27a10b46a46c08a" cmd=["grpc_health_probe","-addr=:50051"] Nov 24 08:01:53 crc kubenswrapper[4691]: E1124 08:01:53.396657 4691 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 645f216e02f7bf0274cab15a8cb0937543d6b08d03cbad35f27a10b46a46c08a is running failed: container process not found" containerID="645f216e02f7bf0274cab15a8cb0937543d6b08d03cbad35f27a10b46a46c08a" cmd=["grpc_health_probe","-addr=:50051"] Nov 24 08:01:53 crc kubenswrapper[4691]: E1124 08:01:53.396745 4691 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 645f216e02f7bf0274cab15a8cb0937543d6b08d03cbad35f27a10b46a46c08a is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-operators-cbnjg" podUID="5c3c36fe-8ff0-4639-ae9c-e69785ea4611" containerName="registry-server" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.532366 4691 generic.go:334] "Generic (PLEG): container finished" podID="50e404aa-af0b-471a-9289-ab9bb5317ffc" containerID="1da44a96efd55d3c0b8a902e184022f8dd8e3d5b611502eb2c86cee8206c93a6" exitCode=0 Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.532727 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2t866" event={"ID":"50e404aa-af0b-471a-9289-ab9bb5317ffc","Type":"ContainerDied","Data":"1da44a96efd55d3c0b8a902e184022f8dd8e3d5b611502eb2c86cee8206c93a6"} Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.537085 4691 generic.go:334] "Generic (PLEG): container finished" podID="5c3c36fe-8ff0-4639-ae9c-e69785ea4611" containerID="645f216e02f7bf0274cab15a8cb0937543d6b08d03cbad35f27a10b46a46c08a" exitCode=0 Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.537129 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cbnjg" event={"ID":"5c3c36fe-8ff0-4639-ae9c-e69785ea4611","Type":"ContainerDied","Data":"645f216e02f7bf0274cab15a8cb0937543d6b08d03cbad35f27a10b46a46c08a"} Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.538398 4691 generic.go:334] "Generic (PLEG): container finished" podID="28d1a6e7-60cf-4233-9298-4a561b105271" containerID="3fad668274b2906cb8bc8f84b7d0b0ef3e5fc19797c064fd2e48ef1cdeec7bf4" exitCode=0 Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.538437 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-cbkxk" event={"ID":"28d1a6e7-60cf-4233-9298-4a561b105271","Type":"ContainerDied","Data":"3fad668274b2906cb8bc8f84b7d0b0ef3e5fc19797c064fd2e48ef1cdeec7bf4"} Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.540865 4691 generic.go:334] "Generic (PLEG): container finished" podID="9312bc5d-54a5-4172-9674-1afebef9cc98" containerID="e84332a2ddcf939d9b273ac6567be27e1871d0d05b00708654ad2f8f49cd7ae6" exitCode=0 Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.540904 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pnnmn" event={"ID":"9312bc5d-54a5-4172-9674-1afebef9cc98","Type":"ContainerDied","Data":"e84332a2ddcf939d9b273ac6567be27e1871d0d05b00708654ad2f8f49cd7ae6"} Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.542136 4691 generic.go:334] "Generic (PLEG): container finished" podID="ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e" containerID="ce04c23a0bfb4dd3ca78ba9c16fc588942c0ac0c32675e14b8ac96ad61f32c10" exitCode=0 Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.542158 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5r6nd" event={"ID":"ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e","Type":"ContainerDied","Data":"ce04c23a0bfb4dd3ca78ba9c16fc588942c0ac0c32675e14b8ac96ad61f32c10"} Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.542172 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5r6nd" event={"ID":"ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e","Type":"ContainerDied","Data":"255789470c056f5e4bc3553e3aef2defcf5c03a8630c60976f6f648ea2e6c055"} Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.542183 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="255789470c056f5e4bc3553e3aef2defcf5c03a8630c60976f6f648ea2e6c055" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.549950 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-wgwxf" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.621550 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5r6nd" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.633638 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pnnmn" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.649101 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-cbkxk" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.655365 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2t866" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.674043 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cpq4c\" (UniqueName: \"kubernetes.io/projected/9312bc5d-54a5-4172-9674-1afebef9cc98-kube-api-access-cpq4c\") pod \"9312bc5d-54a5-4172-9674-1afebef9cc98\" (UID: \"9312bc5d-54a5-4172-9674-1afebef9cc98\") " Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.674110 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/28d1a6e7-60cf-4233-9298-4a561b105271-marketplace-operator-metrics\") pod \"28d1a6e7-60cf-4233-9298-4a561b105271\" (UID: \"28d1a6e7-60cf-4233-9298-4a561b105271\") " Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.674153 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9312bc5d-54a5-4172-9674-1afebef9cc98-utilities\") pod \"9312bc5d-54a5-4172-9674-1afebef9cc98\" (UID: \"9312bc5d-54a5-4172-9674-1afebef9cc98\") " Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.674181 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2dlgz\" (UniqueName: \"kubernetes.io/projected/ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e-kube-api-access-2dlgz\") pod \"ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e\" (UID: \"ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e\") " Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.674222 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/28d1a6e7-60cf-4233-9298-4a561b105271-marketplace-trusted-ca\") pod \"28d1a6e7-60cf-4233-9298-4a561b105271\" (UID: \"28d1a6e7-60cf-4233-9298-4a561b105271\") " Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.674239 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e-catalog-content\") pod \"ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e\" (UID: \"ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e\") " Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.674276 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/50e404aa-af0b-471a-9289-ab9bb5317ffc-utilities\") pod \"50e404aa-af0b-471a-9289-ab9bb5317ffc\" (UID: \"50e404aa-af0b-471a-9289-ab9bb5317ffc\") " Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.674291 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgkzd\" (UniqueName: \"kubernetes.io/projected/50e404aa-af0b-471a-9289-ab9bb5317ffc-kube-api-access-zgkzd\") pod \"50e404aa-af0b-471a-9289-ab9bb5317ffc\" (UID: \"50e404aa-af0b-471a-9289-ab9bb5317ffc\") " Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.674328 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lbkfn\" (UniqueName: \"kubernetes.io/projected/28d1a6e7-60cf-4233-9298-4a561b105271-kube-api-access-lbkfn\") pod \"28d1a6e7-60cf-4233-9298-4a561b105271\" (UID: \"28d1a6e7-60cf-4233-9298-4a561b105271\") " Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.674347 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9312bc5d-54a5-4172-9674-1afebef9cc98-catalog-content\") pod \"9312bc5d-54a5-4172-9674-1afebef9cc98\" (UID: \"9312bc5d-54a5-4172-9674-1afebef9cc98\") " Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.674389 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/50e404aa-af0b-471a-9289-ab9bb5317ffc-catalog-content\") pod \"50e404aa-af0b-471a-9289-ab9bb5317ffc\" (UID: \"50e404aa-af0b-471a-9289-ab9bb5317ffc\") " Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.674411 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e-utilities\") pod \"ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e\" (UID: \"ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e\") " Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.675549 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e-utilities" (OuterVolumeSpecName: "utilities") pod "ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e" (UID: "ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.684334 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/50e404aa-af0b-471a-9289-ab9bb5317ffc-utilities" (OuterVolumeSpecName: "utilities") pod "50e404aa-af0b-471a-9289-ab9bb5317ffc" (UID: "50e404aa-af0b-471a-9289-ab9bb5317ffc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.686374 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/28d1a6e7-60cf-4233-9298-4a561b105271-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "28d1a6e7-60cf-4233-9298-4a561b105271" (UID: "28d1a6e7-60cf-4233-9298-4a561b105271"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.701611 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9312bc5d-54a5-4172-9674-1afebef9cc98-utilities" (OuterVolumeSpecName: "utilities") pod "9312bc5d-54a5-4172-9674-1afebef9cc98" (UID: "9312bc5d-54a5-4172-9674-1afebef9cc98"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.712277 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/50e404aa-af0b-471a-9289-ab9bb5317ffc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "50e404aa-af0b-471a-9289-ab9bb5317ffc" (UID: "50e404aa-af0b-471a-9289-ab9bb5317ffc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.742073 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e" (UID: "ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.754259 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/50e404aa-af0b-471a-9289-ab9bb5317ffc-kube-api-access-zgkzd" (OuterVolumeSpecName: "kube-api-access-zgkzd") pod "50e404aa-af0b-471a-9289-ab9bb5317ffc" (UID: "50e404aa-af0b-471a-9289-ab9bb5317ffc"). InnerVolumeSpecName "kube-api-access-zgkzd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.754468 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e-kube-api-access-2dlgz" (OuterVolumeSpecName: "kube-api-access-2dlgz") pod "ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e" (UID: "ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e"). InnerVolumeSpecName "kube-api-access-2dlgz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.755115 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9312bc5d-54a5-4172-9674-1afebef9cc98-kube-api-access-cpq4c" (OuterVolumeSpecName: "kube-api-access-cpq4c") pod "9312bc5d-54a5-4172-9674-1afebef9cc98" (UID: "9312bc5d-54a5-4172-9674-1afebef9cc98"). InnerVolumeSpecName "kube-api-access-cpq4c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.755667 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28d1a6e7-60cf-4233-9298-4a561b105271-kube-api-access-lbkfn" (OuterVolumeSpecName: "kube-api-access-lbkfn") pod "28d1a6e7-60cf-4233-9298-4a561b105271" (UID: "28d1a6e7-60cf-4233-9298-4a561b105271"). InnerVolumeSpecName "kube-api-access-lbkfn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.755678 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28d1a6e7-60cf-4233-9298-4a561b105271-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "28d1a6e7-60cf-4233-9298-4a561b105271" (UID: "28d1a6e7-60cf-4233-9298-4a561b105271"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.775301 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.775334 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/50e404aa-af0b-471a-9289-ab9bb5317ffc-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.775345 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cpq4c\" (UniqueName: \"kubernetes.io/projected/9312bc5d-54a5-4172-9674-1afebef9cc98-kube-api-access-cpq4c\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.775357 4691 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/28d1a6e7-60cf-4233-9298-4a561b105271-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.775385 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9312bc5d-54a5-4172-9674-1afebef9cc98-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.775397 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2dlgz\" (UniqueName: \"kubernetes.io/projected/ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e-kube-api-access-2dlgz\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.775406 4691 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/28d1a6e7-60cf-4233-9298-4a561b105271-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.775416 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.775423 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/50e404aa-af0b-471a-9289-ab9bb5317ffc-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.775431 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgkzd\" (UniqueName: \"kubernetes.io/projected/50e404aa-af0b-471a-9289-ab9bb5317ffc-kube-api-access-zgkzd\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.775440 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lbkfn\" (UniqueName: \"kubernetes.io/projected/28d1a6e7-60cf-4233-9298-4a561b105271-kube-api-access-lbkfn\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.797254 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cbnjg" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.819121 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9312bc5d-54a5-4172-9674-1afebef9cc98-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9312bc5d-54a5-4172-9674-1afebef9cc98" (UID: "9312bc5d-54a5-4172-9674-1afebef9cc98"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.875720 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c3c36fe-8ff0-4639-ae9c-e69785ea4611-utilities\") pod \"5c3c36fe-8ff0-4639-ae9c-e69785ea4611\" (UID: \"5c3c36fe-8ff0-4639-ae9c-e69785ea4611\") " Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.875775 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c3c36fe-8ff0-4639-ae9c-e69785ea4611-catalog-content\") pod \"5c3c36fe-8ff0-4639-ae9c-e69785ea4611\" (UID: \"5c3c36fe-8ff0-4639-ae9c-e69785ea4611\") " Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.875819 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6wk6\" (UniqueName: \"kubernetes.io/projected/5c3c36fe-8ff0-4639-ae9c-e69785ea4611-kube-api-access-d6wk6\") pod \"5c3c36fe-8ff0-4639-ae9c-e69785ea4611\" (UID: \"5c3c36fe-8ff0-4639-ae9c-e69785ea4611\") " Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.875961 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9312bc5d-54a5-4172-9674-1afebef9cc98-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.877052 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c3c36fe-8ff0-4639-ae9c-e69785ea4611-utilities" (OuterVolumeSpecName: "utilities") pod "5c3c36fe-8ff0-4639-ae9c-e69785ea4611" (UID: "5c3c36fe-8ff0-4639-ae9c-e69785ea4611"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.879271 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c3c36fe-8ff0-4639-ae9c-e69785ea4611-kube-api-access-d6wk6" (OuterVolumeSpecName: "kube-api-access-d6wk6") pod "5c3c36fe-8ff0-4639-ae9c-e69785ea4611" (UID: "5c3c36fe-8ff0-4639-ae9c-e69785ea4611"). InnerVolumeSpecName "kube-api-access-d6wk6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.965969 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c3c36fe-8ff0-4639-ae9c-e69785ea4611-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5c3c36fe-8ff0-4639-ae9c-e69785ea4611" (UID: "5c3c36fe-8ff0-4639-ae9c-e69785ea4611"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.977248 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c3c36fe-8ff0-4639-ae9c-e69785ea4611-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.977279 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c3c36fe-8ff0-4639-ae9c-e69785ea4611-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:53 crc kubenswrapper[4691]: I1124 08:01:53.977291 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6wk6\" (UniqueName: \"kubernetes.io/projected/5c3c36fe-8ff0-4639-ae9c-e69785ea4611-kube-api-access-d6wk6\") on node \"crc\" DevicePath \"\"" Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.098277 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-wgwxf"] Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.550965 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-wgwxf" event={"ID":"74a9daa2-7bfc-487c-9990-9848391da95d","Type":"ContainerStarted","Data":"6fc43c4f4f51f288f3e13c7a57afc7555d9f0aac56537915d0d27b3fd0902f17"} Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.551694 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-wgwxf" event={"ID":"74a9daa2-7bfc-487c-9990-9848391da95d","Type":"ContainerStarted","Data":"d6dda348188387824abcab6bf10f9cb373c3dbf5964954140039c92ecc9d897a"} Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.551721 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-wgwxf" Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.554673 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2t866" event={"ID":"50e404aa-af0b-471a-9289-ab9bb5317ffc","Type":"ContainerDied","Data":"573c89398c69dffdbd4752c8a40b50124e833972f1c44d4c46a252484aa75086"} Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.554744 4691 scope.go:117] "RemoveContainer" containerID="1da44a96efd55d3c0b8a902e184022f8dd8e3d5b611502eb2c86cee8206c93a6" Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.554918 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2t866" Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.558584 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cbnjg" event={"ID":"5c3c36fe-8ff0-4639-ae9c-e69785ea4611","Type":"ContainerDied","Data":"1b2d5cfe20c9fa75f3c8a4884d40174a134947300c4ca07264cc6bfac01711ca"} Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.558617 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cbnjg" Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.559913 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-cbkxk" event={"ID":"28d1a6e7-60cf-4233-9298-4a561b105271","Type":"ContainerDied","Data":"73f4c8fee0abea023921be454c9d0e67beff15e409cf6c67272177e02fcddd9c"} Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.560013 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-cbkxk" Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.564046 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-wgwxf" Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.566199 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5r6nd" Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.567579 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pnnmn" event={"ID":"9312bc5d-54a5-4172-9674-1afebef9cc98","Type":"ContainerDied","Data":"f3ee436203739ba08ca4a5f9de7e007253fab5b7337c295a0e003566d95b022d"} Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.567682 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pnnmn" Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.570412 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-wgwxf" podStartSLOduration=1.5704009540000001 podStartE2EDuration="1.570400954s" podCreationTimestamp="2025-11-24 08:01:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:01:54.569440075 +0000 UTC m=+276.568389324" watchObservedRunningTime="2025-11-24 08:01:54.570400954 +0000 UTC m=+276.569350203" Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.584235 4691 scope.go:117] "RemoveContainer" containerID="9be25836b8018d4c8e0565f009c9e830bfafcc9bdb554921513027a48be8b025" Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.642617 4691 scope.go:117] "RemoveContainer" containerID="60ea4e33161425139b91adc2c72aa669d439ff8cb55fa445c03f68b13c5dd1dc" Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.649092 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5r6nd"] Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.664681 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-5r6nd"] Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.668883 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2t866"] Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.671506 4691 scope.go:117] "RemoveContainer" containerID="645f216e02f7bf0274cab15a8cb0937543d6b08d03cbad35f27a10b46a46c08a" Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.671531 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-2t866"] Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.686783 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cbnjg"] Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.696873 4691 scope.go:117] "RemoveContainer" containerID="ae5bf999410071387a25a9446bbf82bd60c37675b9f947813823e99508c6e406" Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.697673 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-cbnjg"] Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.703492 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-cbkxk"] Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.707905 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-cbkxk"] Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.712084 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pnnmn"] Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.718155 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-pnnmn"] Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.725479 4691 scope.go:117] "RemoveContainer" containerID="816346317d4822ba600849ea4d8245a561597e2559d11be8ff8f079de7c2d029" Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.741383 4691 scope.go:117] "RemoveContainer" containerID="3fad668274b2906cb8bc8f84b7d0b0ef3e5fc19797c064fd2e48ef1cdeec7bf4" Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.754659 4691 scope.go:117] "RemoveContainer" containerID="e84332a2ddcf939d9b273ac6567be27e1871d0d05b00708654ad2f8f49cd7ae6" Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.776271 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="28d1a6e7-60cf-4233-9298-4a561b105271" path="/var/lib/kubelet/pods/28d1a6e7-60cf-4233-9298-4a561b105271/volumes" Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.776888 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="50e404aa-af0b-471a-9289-ab9bb5317ffc" path="/var/lib/kubelet/pods/50e404aa-af0b-471a-9289-ab9bb5317ffc/volumes" Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.777572 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5c3c36fe-8ff0-4639-ae9c-e69785ea4611" path="/var/lib/kubelet/pods/5c3c36fe-8ff0-4639-ae9c-e69785ea4611/volumes" Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.779759 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9312bc5d-54a5-4172-9674-1afebef9cc98" path="/var/lib/kubelet/pods/9312bc5d-54a5-4172-9674-1afebef9cc98/volumes" Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.781280 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e" path="/var/lib/kubelet/pods/ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e/volumes" Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.790316 4691 scope.go:117] "RemoveContainer" containerID="4b1de7ca78bf651c84161b3469016d9e641d0f852a5f1d1dd9dd3ec802054cff" Nov 24 08:01:54 crc kubenswrapper[4691]: I1124 08:01:54.811876 4691 scope.go:117] "RemoveContainer" containerID="96806321aa83041c079d3bf2e4bb0f84dcabaea8204ecaff5dd74c45e4b0bb98" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.369153 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-jq477"] Nov 24 08:01:55 crc kubenswrapper[4691]: E1124 08:01:55.369897 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c3c36fe-8ff0-4639-ae9c-e69785ea4611" containerName="extract-content" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.369917 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c3c36fe-8ff0-4639-ae9c-e69785ea4611" containerName="extract-content" Nov 24 08:01:55 crc kubenswrapper[4691]: E1124 08:01:55.369946 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50e404aa-af0b-471a-9289-ab9bb5317ffc" containerName="extract-utilities" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.369955 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="50e404aa-af0b-471a-9289-ab9bb5317ffc" containerName="extract-utilities" Nov 24 08:01:55 crc kubenswrapper[4691]: E1124 08:01:55.369964 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e" containerName="extract-content" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.369972 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e" containerName="extract-content" Nov 24 08:01:55 crc kubenswrapper[4691]: E1124 08:01:55.369979 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50e404aa-af0b-471a-9289-ab9bb5317ffc" containerName="registry-server" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.369986 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="50e404aa-af0b-471a-9289-ab9bb5317ffc" containerName="registry-server" Nov 24 08:01:55 crc kubenswrapper[4691]: E1124 08:01:55.369997 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9312bc5d-54a5-4172-9674-1afebef9cc98" containerName="registry-server" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.370008 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="9312bc5d-54a5-4172-9674-1afebef9cc98" containerName="registry-server" Nov 24 08:01:55 crc kubenswrapper[4691]: E1124 08:01:55.370022 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c3c36fe-8ff0-4639-ae9c-e69785ea4611" containerName="registry-server" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.370029 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c3c36fe-8ff0-4639-ae9c-e69785ea4611" containerName="registry-server" Nov 24 08:01:55 crc kubenswrapper[4691]: E1124 08:01:55.370039 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9312bc5d-54a5-4172-9674-1afebef9cc98" containerName="extract-content" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.370046 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="9312bc5d-54a5-4172-9674-1afebef9cc98" containerName="extract-content" Nov 24 08:01:55 crc kubenswrapper[4691]: E1124 08:01:55.370056 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e" containerName="extract-utilities" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.370064 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e" containerName="extract-utilities" Nov 24 08:01:55 crc kubenswrapper[4691]: E1124 08:01:55.370094 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9312bc5d-54a5-4172-9674-1afebef9cc98" containerName="extract-utilities" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.370103 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="9312bc5d-54a5-4172-9674-1afebef9cc98" containerName="extract-utilities" Nov 24 08:01:55 crc kubenswrapper[4691]: E1124 08:01:55.370112 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c3c36fe-8ff0-4639-ae9c-e69785ea4611" containerName="extract-utilities" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.370120 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c3c36fe-8ff0-4639-ae9c-e69785ea4611" containerName="extract-utilities" Nov 24 08:01:55 crc kubenswrapper[4691]: E1124 08:01:55.370134 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e" containerName="registry-server" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.370142 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e" containerName="registry-server" Nov 24 08:01:55 crc kubenswrapper[4691]: E1124 08:01:55.370150 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50e404aa-af0b-471a-9289-ab9bb5317ffc" containerName="extract-content" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.370157 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="50e404aa-af0b-471a-9289-ab9bb5317ffc" containerName="extract-content" Nov 24 08:01:55 crc kubenswrapper[4691]: E1124 08:01:55.370167 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28d1a6e7-60cf-4233-9298-4a561b105271" containerName="marketplace-operator" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.370174 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="28d1a6e7-60cf-4233-9298-4a561b105271" containerName="marketplace-operator" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.370276 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c3c36fe-8ff0-4639-ae9c-e69785ea4611" containerName="registry-server" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.370289 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="ccd1dae5-d9ae-4b7d-aa2b-f85ee6127c5e" containerName="registry-server" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.370302 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="9312bc5d-54a5-4172-9674-1afebef9cc98" containerName="registry-server" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.370319 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="50e404aa-af0b-471a-9289-ab9bb5317ffc" containerName="registry-server" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.370331 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="28d1a6e7-60cf-4233-9298-4a561b105271" containerName="marketplace-operator" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.371178 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jq477" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.373492 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.423137 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jq477"] Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.501708 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a985ac0-8176-499f-86d2-f58210944072-utilities\") pod \"certified-operators-jq477\" (UID: \"5a985ac0-8176-499f-86d2-f58210944072\") " pod="openshift-marketplace/certified-operators-jq477" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.502008 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a985ac0-8176-499f-86d2-f58210944072-catalog-content\") pod \"certified-operators-jq477\" (UID: \"5a985ac0-8176-499f-86d2-f58210944072\") " pod="openshift-marketplace/certified-operators-jq477" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.502164 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r4pv7\" (UniqueName: \"kubernetes.io/projected/5a985ac0-8176-499f-86d2-f58210944072-kube-api-access-r4pv7\") pod \"certified-operators-jq477\" (UID: \"5a985ac0-8176-499f-86d2-f58210944072\") " pod="openshift-marketplace/certified-operators-jq477" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.572940 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-ggwnq"] Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.580684 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ggwnq" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.580898 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ggwnq"] Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.588971 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.604041 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a985ac0-8176-499f-86d2-f58210944072-catalog-content\") pod \"certified-operators-jq477\" (UID: \"5a985ac0-8176-499f-86d2-f58210944072\") " pod="openshift-marketplace/certified-operators-jq477" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.604114 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r4pv7\" (UniqueName: \"kubernetes.io/projected/5a985ac0-8176-499f-86d2-f58210944072-kube-api-access-r4pv7\") pod \"certified-operators-jq477\" (UID: \"5a985ac0-8176-499f-86d2-f58210944072\") " pod="openshift-marketplace/certified-operators-jq477" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.604153 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a985ac0-8176-499f-86d2-f58210944072-utilities\") pod \"certified-operators-jq477\" (UID: \"5a985ac0-8176-499f-86d2-f58210944072\") " pod="openshift-marketplace/certified-operators-jq477" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.604881 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a985ac0-8176-499f-86d2-f58210944072-utilities\") pod \"certified-operators-jq477\" (UID: \"5a985ac0-8176-499f-86d2-f58210944072\") " pod="openshift-marketplace/certified-operators-jq477" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.605390 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a985ac0-8176-499f-86d2-f58210944072-catalog-content\") pod \"certified-operators-jq477\" (UID: \"5a985ac0-8176-499f-86d2-f58210944072\") " pod="openshift-marketplace/certified-operators-jq477" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.631933 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r4pv7\" (UniqueName: \"kubernetes.io/projected/5a985ac0-8176-499f-86d2-f58210944072-kube-api-access-r4pv7\") pod \"certified-operators-jq477\" (UID: \"5a985ac0-8176-499f-86d2-f58210944072\") " pod="openshift-marketplace/certified-operators-jq477" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.704247 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jq477" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.705952 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e4c460a6-e0d6-48d6-a225-b4e73926b492-utilities\") pod \"redhat-marketplace-ggwnq\" (UID: \"e4c460a6-e0d6-48d6-a225-b4e73926b492\") " pod="openshift-marketplace/redhat-marketplace-ggwnq" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.706122 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e4c460a6-e0d6-48d6-a225-b4e73926b492-catalog-content\") pod \"redhat-marketplace-ggwnq\" (UID: \"e4c460a6-e0d6-48d6-a225-b4e73926b492\") " pod="openshift-marketplace/redhat-marketplace-ggwnq" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.706177 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-khkdr\" (UniqueName: \"kubernetes.io/projected/e4c460a6-e0d6-48d6-a225-b4e73926b492-kube-api-access-khkdr\") pod \"redhat-marketplace-ggwnq\" (UID: \"e4c460a6-e0d6-48d6-a225-b4e73926b492\") " pod="openshift-marketplace/redhat-marketplace-ggwnq" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.807699 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e4c460a6-e0d6-48d6-a225-b4e73926b492-catalog-content\") pod \"redhat-marketplace-ggwnq\" (UID: \"e4c460a6-e0d6-48d6-a225-b4e73926b492\") " pod="openshift-marketplace/redhat-marketplace-ggwnq" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.807758 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-khkdr\" (UniqueName: \"kubernetes.io/projected/e4c460a6-e0d6-48d6-a225-b4e73926b492-kube-api-access-khkdr\") pod \"redhat-marketplace-ggwnq\" (UID: \"e4c460a6-e0d6-48d6-a225-b4e73926b492\") " pod="openshift-marketplace/redhat-marketplace-ggwnq" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.807824 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e4c460a6-e0d6-48d6-a225-b4e73926b492-utilities\") pod \"redhat-marketplace-ggwnq\" (UID: \"e4c460a6-e0d6-48d6-a225-b4e73926b492\") " pod="openshift-marketplace/redhat-marketplace-ggwnq" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.817744 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e4c460a6-e0d6-48d6-a225-b4e73926b492-utilities\") pod \"redhat-marketplace-ggwnq\" (UID: \"e4c460a6-e0d6-48d6-a225-b4e73926b492\") " pod="openshift-marketplace/redhat-marketplace-ggwnq" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.817756 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e4c460a6-e0d6-48d6-a225-b4e73926b492-catalog-content\") pod \"redhat-marketplace-ggwnq\" (UID: \"e4c460a6-e0d6-48d6-a225-b4e73926b492\") " pod="openshift-marketplace/redhat-marketplace-ggwnq" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.835319 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-khkdr\" (UniqueName: \"kubernetes.io/projected/e4c460a6-e0d6-48d6-a225-b4e73926b492-kube-api-access-khkdr\") pod \"redhat-marketplace-ggwnq\" (UID: \"e4c460a6-e0d6-48d6-a225-b4e73926b492\") " pod="openshift-marketplace/redhat-marketplace-ggwnq" Nov 24 08:01:55 crc kubenswrapper[4691]: I1124 08:01:55.898892 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ggwnq" Nov 24 08:01:56 crc kubenswrapper[4691]: I1124 08:01:56.083642 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ggwnq"] Nov 24 08:01:56 crc kubenswrapper[4691]: I1124 08:01:56.125045 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jq477"] Nov 24 08:01:56 crc kubenswrapper[4691]: W1124 08:01:56.136256 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5a985ac0_8176_499f_86d2_f58210944072.slice/crio-ea27f1a003749dcca2c4de92fe4d584c6b88dcd2d2df588956fa03fcd2b142aa WatchSource:0}: Error finding container ea27f1a003749dcca2c4de92fe4d584c6b88dcd2d2df588956fa03fcd2b142aa: Status 404 returned error can't find the container with id ea27f1a003749dcca2c4de92fe4d584c6b88dcd2d2df588956fa03fcd2b142aa Nov 24 08:01:56 crc kubenswrapper[4691]: I1124 08:01:56.596540 4691 generic.go:334] "Generic (PLEG): container finished" podID="5a985ac0-8176-499f-86d2-f58210944072" containerID="0e22ab8388c70b2a4a5c04a54447a2b544d717126c74df21d5d9736871ce8e7a" exitCode=0 Nov 24 08:01:56 crc kubenswrapper[4691]: I1124 08:01:56.596618 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jq477" event={"ID":"5a985ac0-8176-499f-86d2-f58210944072","Type":"ContainerDied","Data":"0e22ab8388c70b2a4a5c04a54447a2b544d717126c74df21d5d9736871ce8e7a"} Nov 24 08:01:56 crc kubenswrapper[4691]: I1124 08:01:56.596655 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jq477" event={"ID":"5a985ac0-8176-499f-86d2-f58210944072","Type":"ContainerStarted","Data":"ea27f1a003749dcca2c4de92fe4d584c6b88dcd2d2df588956fa03fcd2b142aa"} Nov 24 08:01:56 crc kubenswrapper[4691]: I1124 08:01:56.599236 4691 generic.go:334] "Generic (PLEG): container finished" podID="e4c460a6-e0d6-48d6-a225-b4e73926b492" containerID="42a73bd811cdecead5ef29cb2d9f85463356affc712d4ae7d83f8e57a65ea9fd" exitCode=0 Nov 24 08:01:56 crc kubenswrapper[4691]: I1124 08:01:56.600908 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ggwnq" event={"ID":"e4c460a6-e0d6-48d6-a225-b4e73926b492","Type":"ContainerDied","Data":"42a73bd811cdecead5ef29cb2d9f85463356affc712d4ae7d83f8e57a65ea9fd"} Nov 24 08:01:56 crc kubenswrapper[4691]: I1124 08:01:56.600934 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ggwnq" event={"ID":"e4c460a6-e0d6-48d6-a225-b4e73926b492","Type":"ContainerStarted","Data":"a1d30c61135ad55ae667600e9ef36d77ccf0fa420ee955e1ac9cb748cd253f8f"} Nov 24 08:01:57 crc kubenswrapper[4691]: I1124 08:01:57.609304 4691 generic.go:334] "Generic (PLEG): container finished" podID="5a985ac0-8176-499f-86d2-f58210944072" containerID="6ecb80bcc430139f859d5d70554e99a58aa59c18539f67e2071eb336b9af3349" exitCode=0 Nov 24 08:01:57 crc kubenswrapper[4691]: I1124 08:01:57.609377 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jq477" event={"ID":"5a985ac0-8176-499f-86d2-f58210944072","Type":"ContainerDied","Data":"6ecb80bcc430139f859d5d70554e99a58aa59c18539f67e2071eb336b9af3349"} Nov 24 08:01:57 crc kubenswrapper[4691]: I1124 08:01:57.613436 4691 generic.go:334] "Generic (PLEG): container finished" podID="e4c460a6-e0d6-48d6-a225-b4e73926b492" containerID="43a0a606ed3ae389a2b1b6e2aff0f05e080593fda661d3b9e2d710d9979327f0" exitCode=0 Nov 24 08:01:57 crc kubenswrapper[4691]: I1124 08:01:57.613482 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ggwnq" event={"ID":"e4c460a6-e0d6-48d6-a225-b4e73926b492","Type":"ContainerDied","Data":"43a0a606ed3ae389a2b1b6e2aff0f05e080593fda661d3b9e2d710d9979327f0"} Nov 24 08:01:57 crc kubenswrapper[4691]: I1124 08:01:57.834591 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-zkkjt"] Nov 24 08:01:57 crc kubenswrapper[4691]: I1124 08:01:57.836979 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zkkjt" Nov 24 08:01:57 crc kubenswrapper[4691]: I1124 08:01:57.839741 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zkkjt"] Nov 24 08:01:57 crc kubenswrapper[4691]: I1124 08:01:57.839817 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 24 08:01:57 crc kubenswrapper[4691]: I1124 08:01:57.939389 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69r5m\" (UniqueName: \"kubernetes.io/projected/25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409-kube-api-access-69r5m\") pod \"community-operators-zkkjt\" (UID: \"25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409\") " pod="openshift-marketplace/community-operators-zkkjt" Nov 24 08:01:57 crc kubenswrapper[4691]: I1124 08:01:57.939483 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409-utilities\") pod \"community-operators-zkkjt\" (UID: \"25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409\") " pod="openshift-marketplace/community-operators-zkkjt" Nov 24 08:01:57 crc kubenswrapper[4691]: I1124 08:01:57.939514 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409-catalog-content\") pod \"community-operators-zkkjt\" (UID: \"25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409\") " pod="openshift-marketplace/community-operators-zkkjt" Nov 24 08:01:57 crc kubenswrapper[4691]: I1124 08:01:57.967626 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-gj4c6"] Nov 24 08:01:57 crc kubenswrapper[4691]: I1124 08:01:57.969264 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gj4c6" Nov 24 08:01:57 crc kubenswrapper[4691]: I1124 08:01:57.972668 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 24 08:01:57 crc kubenswrapper[4691]: I1124 08:01:57.983809 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gj4c6"] Nov 24 08:01:58 crc kubenswrapper[4691]: I1124 08:01:58.041552 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b6731c0-8185-4b19-8f1e-c3a6b85b972e-catalog-content\") pod \"redhat-operators-gj4c6\" (UID: \"7b6731c0-8185-4b19-8f1e-c3a6b85b972e\") " pod="openshift-marketplace/redhat-operators-gj4c6" Nov 24 08:01:58 crc kubenswrapper[4691]: I1124 08:01:58.041647 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c8t6g\" (UniqueName: \"kubernetes.io/projected/7b6731c0-8185-4b19-8f1e-c3a6b85b972e-kube-api-access-c8t6g\") pod \"redhat-operators-gj4c6\" (UID: \"7b6731c0-8185-4b19-8f1e-c3a6b85b972e\") " pod="openshift-marketplace/redhat-operators-gj4c6" Nov 24 08:01:58 crc kubenswrapper[4691]: I1124 08:01:58.041779 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b6731c0-8185-4b19-8f1e-c3a6b85b972e-utilities\") pod \"redhat-operators-gj4c6\" (UID: \"7b6731c0-8185-4b19-8f1e-c3a6b85b972e\") " pod="openshift-marketplace/redhat-operators-gj4c6" Nov 24 08:01:58 crc kubenswrapper[4691]: I1124 08:01:58.041891 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69r5m\" (UniqueName: \"kubernetes.io/projected/25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409-kube-api-access-69r5m\") pod \"community-operators-zkkjt\" (UID: \"25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409\") " pod="openshift-marketplace/community-operators-zkkjt" Nov 24 08:01:58 crc kubenswrapper[4691]: I1124 08:01:58.041932 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409-utilities\") pod \"community-operators-zkkjt\" (UID: \"25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409\") " pod="openshift-marketplace/community-operators-zkkjt" Nov 24 08:01:58 crc kubenswrapper[4691]: I1124 08:01:58.041955 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409-catalog-content\") pod \"community-operators-zkkjt\" (UID: \"25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409\") " pod="openshift-marketplace/community-operators-zkkjt" Nov 24 08:01:58 crc kubenswrapper[4691]: I1124 08:01:58.042514 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409-catalog-content\") pod \"community-operators-zkkjt\" (UID: \"25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409\") " pod="openshift-marketplace/community-operators-zkkjt" Nov 24 08:01:58 crc kubenswrapper[4691]: I1124 08:01:58.043207 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409-utilities\") pod \"community-operators-zkkjt\" (UID: \"25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409\") " pod="openshift-marketplace/community-operators-zkkjt" Nov 24 08:01:58 crc kubenswrapper[4691]: I1124 08:01:58.064635 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69r5m\" (UniqueName: \"kubernetes.io/projected/25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409-kube-api-access-69r5m\") pod \"community-operators-zkkjt\" (UID: \"25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409\") " pod="openshift-marketplace/community-operators-zkkjt" Nov 24 08:01:58 crc kubenswrapper[4691]: I1124 08:01:58.143021 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b6731c0-8185-4b19-8f1e-c3a6b85b972e-catalog-content\") pod \"redhat-operators-gj4c6\" (UID: \"7b6731c0-8185-4b19-8f1e-c3a6b85b972e\") " pod="openshift-marketplace/redhat-operators-gj4c6" Nov 24 08:01:58 crc kubenswrapper[4691]: I1124 08:01:58.143096 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c8t6g\" (UniqueName: \"kubernetes.io/projected/7b6731c0-8185-4b19-8f1e-c3a6b85b972e-kube-api-access-c8t6g\") pod \"redhat-operators-gj4c6\" (UID: \"7b6731c0-8185-4b19-8f1e-c3a6b85b972e\") " pod="openshift-marketplace/redhat-operators-gj4c6" Nov 24 08:01:58 crc kubenswrapper[4691]: I1124 08:01:58.143123 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b6731c0-8185-4b19-8f1e-c3a6b85b972e-utilities\") pod \"redhat-operators-gj4c6\" (UID: \"7b6731c0-8185-4b19-8f1e-c3a6b85b972e\") " pod="openshift-marketplace/redhat-operators-gj4c6" Nov 24 08:01:58 crc kubenswrapper[4691]: I1124 08:01:58.143648 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b6731c0-8185-4b19-8f1e-c3a6b85b972e-utilities\") pod \"redhat-operators-gj4c6\" (UID: \"7b6731c0-8185-4b19-8f1e-c3a6b85b972e\") " pod="openshift-marketplace/redhat-operators-gj4c6" Nov 24 08:01:58 crc kubenswrapper[4691]: I1124 08:01:58.144014 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b6731c0-8185-4b19-8f1e-c3a6b85b972e-catalog-content\") pod \"redhat-operators-gj4c6\" (UID: \"7b6731c0-8185-4b19-8f1e-c3a6b85b972e\") " pod="openshift-marketplace/redhat-operators-gj4c6" Nov 24 08:01:58 crc kubenswrapper[4691]: I1124 08:01:58.156008 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zkkjt" Nov 24 08:01:58 crc kubenswrapper[4691]: I1124 08:01:58.160436 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c8t6g\" (UniqueName: \"kubernetes.io/projected/7b6731c0-8185-4b19-8f1e-c3a6b85b972e-kube-api-access-c8t6g\") pod \"redhat-operators-gj4c6\" (UID: \"7b6731c0-8185-4b19-8f1e-c3a6b85b972e\") " pod="openshift-marketplace/redhat-operators-gj4c6" Nov 24 08:01:58 crc kubenswrapper[4691]: I1124 08:01:58.287955 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gj4c6" Nov 24 08:01:58 crc kubenswrapper[4691]: I1124 08:01:58.514682 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gj4c6"] Nov 24 08:01:58 crc kubenswrapper[4691]: I1124 08:01:58.587096 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zkkjt"] Nov 24 08:01:58 crc kubenswrapper[4691]: W1124 08:01:58.595507 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod25c29cf2_bd0e_42fa_baa8_a8b2b7ee1409.slice/crio-37d84defefe40fca24b0400d073fa7a818389df23ce294511c66d6dcf25d3aff WatchSource:0}: Error finding container 37d84defefe40fca24b0400d073fa7a818389df23ce294511c66d6dcf25d3aff: Status 404 returned error can't find the container with id 37d84defefe40fca24b0400d073fa7a818389df23ce294511c66d6dcf25d3aff Nov 24 08:01:58 crc kubenswrapper[4691]: I1124 08:01:58.624347 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ggwnq" event={"ID":"e4c460a6-e0d6-48d6-a225-b4e73926b492","Type":"ContainerStarted","Data":"6136fbb1a9eb6bbdb831dc139b4bdd0acdc0c01619732e90655e39673a8c8b37"} Nov 24 08:01:58 crc kubenswrapper[4691]: I1124 08:01:58.628725 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zkkjt" event={"ID":"25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409","Type":"ContainerStarted","Data":"37d84defefe40fca24b0400d073fa7a818389df23ce294511c66d6dcf25d3aff"} Nov 24 08:01:58 crc kubenswrapper[4691]: I1124 08:01:58.630798 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gj4c6" event={"ID":"7b6731c0-8185-4b19-8f1e-c3a6b85b972e","Type":"ContainerStarted","Data":"ad153e7c07a9ecfc800af2bc9fb2804a2759363817cdf6b901533e90163d9de2"} Nov 24 08:01:58 crc kubenswrapper[4691]: I1124 08:01:58.635961 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jq477" event={"ID":"5a985ac0-8176-499f-86d2-f58210944072","Type":"ContainerStarted","Data":"dfa5e2250949e076bdf65cd724701d783ff3f4b12cfdffa06d5cd479d7aa8895"} Nov 24 08:01:58 crc kubenswrapper[4691]: I1124 08:01:58.644602 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-ggwnq" podStartSLOduration=2.208113734 podStartE2EDuration="3.644580283s" podCreationTimestamp="2025-11-24 08:01:55 +0000 UTC" firstStartedPulling="2025-11-24 08:01:56.601730307 +0000 UTC m=+278.600679556" lastFinishedPulling="2025-11-24 08:01:58.038196856 +0000 UTC m=+280.037146105" observedRunningTime="2025-11-24 08:01:58.642436808 +0000 UTC m=+280.641386057" watchObservedRunningTime="2025-11-24 08:01:58.644580283 +0000 UTC m=+280.643529532" Nov 24 08:01:58 crc kubenswrapper[4691]: I1124 08:01:58.669932 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-jq477" podStartSLOduration=2.220608526 podStartE2EDuration="3.669904476s" podCreationTimestamp="2025-11-24 08:01:55 +0000 UTC" firstStartedPulling="2025-11-24 08:01:56.601069527 +0000 UTC m=+278.600018776" lastFinishedPulling="2025-11-24 08:01:58.050365487 +0000 UTC m=+280.049314726" observedRunningTime="2025-11-24 08:01:58.663642025 +0000 UTC m=+280.662591274" watchObservedRunningTime="2025-11-24 08:01:58.669904476 +0000 UTC m=+280.668853725" Nov 24 08:01:59 crc kubenswrapper[4691]: I1124 08:01:59.645312 4691 generic.go:334] "Generic (PLEG): container finished" podID="25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409" containerID="dcc75eff39c4e72b35bc7b930be2c2cf574ab4ebfea6cc607f31e478c20ed684" exitCode=0 Nov 24 08:01:59 crc kubenswrapper[4691]: I1124 08:01:59.645524 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zkkjt" event={"ID":"25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409","Type":"ContainerDied","Data":"dcc75eff39c4e72b35bc7b930be2c2cf574ab4ebfea6cc607f31e478c20ed684"} Nov 24 08:01:59 crc kubenswrapper[4691]: I1124 08:01:59.647884 4691 generic.go:334] "Generic (PLEG): container finished" podID="7b6731c0-8185-4b19-8f1e-c3a6b85b972e" containerID="411071d5562a930dd40608f82bd31eab6599c9cb4637dde67706c7dd36a03d63" exitCode=0 Nov 24 08:01:59 crc kubenswrapper[4691]: I1124 08:01:59.648549 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gj4c6" event={"ID":"7b6731c0-8185-4b19-8f1e-c3a6b85b972e","Type":"ContainerDied","Data":"411071d5562a930dd40608f82bd31eab6599c9cb4637dde67706c7dd36a03d63"} Nov 24 08:02:00 crc kubenswrapper[4691]: I1124 08:02:00.656919 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zkkjt" event={"ID":"25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409","Type":"ContainerStarted","Data":"9fd475b800efa2d851a7abea6b14560441ff1f983d2d7a8bfc665868a074cf90"} Nov 24 08:02:00 crc kubenswrapper[4691]: I1124 08:02:00.659783 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gj4c6" event={"ID":"7b6731c0-8185-4b19-8f1e-c3a6b85b972e","Type":"ContainerStarted","Data":"22fed260e0c485225c7d67e7f5cbc7370f0983172d1e763c3e43e1745235e094"} Nov 24 08:02:01 crc kubenswrapper[4691]: I1124 08:02:01.683732 4691 generic.go:334] "Generic (PLEG): container finished" podID="7b6731c0-8185-4b19-8f1e-c3a6b85b972e" containerID="22fed260e0c485225c7d67e7f5cbc7370f0983172d1e763c3e43e1745235e094" exitCode=0 Nov 24 08:02:01 crc kubenswrapper[4691]: I1124 08:02:01.683824 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gj4c6" event={"ID":"7b6731c0-8185-4b19-8f1e-c3a6b85b972e","Type":"ContainerDied","Data":"22fed260e0c485225c7d67e7f5cbc7370f0983172d1e763c3e43e1745235e094"} Nov 24 08:02:01 crc kubenswrapper[4691]: I1124 08:02:01.688523 4691 generic.go:334] "Generic (PLEG): container finished" podID="25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409" containerID="9fd475b800efa2d851a7abea6b14560441ff1f983d2d7a8bfc665868a074cf90" exitCode=0 Nov 24 08:02:01 crc kubenswrapper[4691]: I1124 08:02:01.688580 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zkkjt" event={"ID":"25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409","Type":"ContainerDied","Data":"9fd475b800efa2d851a7abea6b14560441ff1f983d2d7a8bfc665868a074cf90"} Nov 24 08:02:03 crc kubenswrapper[4691]: I1124 08:02:03.705153 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gj4c6" event={"ID":"7b6731c0-8185-4b19-8f1e-c3a6b85b972e","Type":"ContainerStarted","Data":"2551c0d3fb0952447c6b18e9461be9a2e887851b6ba394801deb138fddd13f2c"} Nov 24 08:02:03 crc kubenswrapper[4691]: I1124 08:02:03.707887 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zkkjt" event={"ID":"25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409","Type":"ContainerStarted","Data":"7078e24e4b9346ec1bd7b1c89ff6e66fa08296c0a87e4018ca71e75093ff2008"} Nov 24 08:02:03 crc kubenswrapper[4691]: I1124 08:02:03.729166 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-gj4c6" podStartSLOduration=4.247261475 podStartE2EDuration="6.729144872s" podCreationTimestamp="2025-11-24 08:01:57 +0000 UTC" firstStartedPulling="2025-11-24 08:01:59.649649992 +0000 UTC m=+281.648599241" lastFinishedPulling="2025-11-24 08:02:02.131533379 +0000 UTC m=+284.130482638" observedRunningTime="2025-11-24 08:02:03.727901464 +0000 UTC m=+285.726850713" watchObservedRunningTime="2025-11-24 08:02:03.729144872 +0000 UTC m=+285.728094121" Nov 24 08:02:03 crc kubenswrapper[4691]: I1124 08:02:03.751920 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-zkkjt" podStartSLOduration=4.286143341 podStartE2EDuration="6.751888446s" podCreationTimestamp="2025-11-24 08:01:57 +0000 UTC" firstStartedPulling="2025-11-24 08:01:59.647088284 +0000 UTC m=+281.646037533" lastFinishedPulling="2025-11-24 08:02:02.112833399 +0000 UTC m=+284.111782638" observedRunningTime="2025-11-24 08:02:03.747144951 +0000 UTC m=+285.746094210" watchObservedRunningTime="2025-11-24 08:02:03.751888446 +0000 UTC m=+285.750837695" Nov 24 08:02:05 crc kubenswrapper[4691]: I1124 08:02:05.707623 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-jq477" Nov 24 08:02:05 crc kubenswrapper[4691]: I1124 08:02:05.708113 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-jq477" Nov 24 08:02:05 crc kubenswrapper[4691]: I1124 08:02:05.760198 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-jq477" Nov 24 08:02:05 crc kubenswrapper[4691]: I1124 08:02:05.813096 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-jq477" Nov 24 08:02:05 crc kubenswrapper[4691]: I1124 08:02:05.899134 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-ggwnq" Nov 24 08:02:05 crc kubenswrapper[4691]: I1124 08:02:05.899194 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-ggwnq" Nov 24 08:02:05 crc kubenswrapper[4691]: I1124 08:02:05.942089 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-ggwnq" Nov 24 08:02:06 crc kubenswrapper[4691]: I1124 08:02:06.769889 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-ggwnq" Nov 24 08:02:08 crc kubenswrapper[4691]: I1124 08:02:08.157094 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-zkkjt" Nov 24 08:02:08 crc kubenswrapper[4691]: I1124 08:02:08.157763 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-zkkjt" Nov 24 08:02:08 crc kubenswrapper[4691]: I1124 08:02:08.203254 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-zkkjt" Nov 24 08:02:08 crc kubenswrapper[4691]: I1124 08:02:08.290139 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-gj4c6" Nov 24 08:02:08 crc kubenswrapper[4691]: I1124 08:02:08.290224 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-gj4c6" Nov 24 08:02:08 crc kubenswrapper[4691]: I1124 08:02:08.783621 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-zkkjt" Nov 24 08:02:09 crc kubenswrapper[4691]: I1124 08:02:09.332633 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gj4c6" podUID="7b6731c0-8185-4b19-8f1e-c3a6b85b972e" containerName="registry-server" probeResult="failure" output=< Nov 24 08:02:09 crc kubenswrapper[4691]: timeout: failed to connect service ":50051" within 1s Nov 24 08:02:09 crc kubenswrapper[4691]: > Nov 24 08:02:18 crc kubenswrapper[4691]: I1124 08:02:18.338264 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-gj4c6" Nov 24 08:02:18 crc kubenswrapper[4691]: I1124 08:02:18.388974 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-gj4c6" Nov 24 08:02:18 crc kubenswrapper[4691]: I1124 08:02:18.522232 4691 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Nov 24 08:02:27 crc kubenswrapper[4691]: I1124 08:02:27.991471 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 24 08:03:21 crc kubenswrapper[4691]: I1124 08:03:21.090329 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:03:21 crc kubenswrapper[4691]: I1124 08:03:21.091293 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:03:51 crc kubenswrapper[4691]: I1124 08:03:51.089351 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:03:51 crc kubenswrapper[4691]: I1124 08:03:51.090127 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:04:21 crc kubenswrapper[4691]: I1124 08:04:21.090189 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:04:21 crc kubenswrapper[4691]: I1124 08:04:21.093142 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:04:21 crc kubenswrapper[4691]: I1124 08:04:21.093491 4691 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 08:04:21 crc kubenswrapper[4691]: I1124 08:04:21.094913 4691 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2696471643d2e0fe14b54a335aee3091d21a0ad84005def235cb124eca7c95b3"} pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 08:04:21 crc kubenswrapper[4691]: I1124 08:04:21.095234 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" containerID="cri-o://2696471643d2e0fe14b54a335aee3091d21a0ad84005def235cb124eca7c95b3" gracePeriod=600 Nov 24 08:04:21 crc kubenswrapper[4691]: I1124 08:04:21.568121 4691 generic.go:334] "Generic (PLEG): container finished" podID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerID="2696471643d2e0fe14b54a335aee3091d21a0ad84005def235cb124eca7c95b3" exitCode=0 Nov 24 08:04:21 crc kubenswrapper[4691]: I1124 08:04:21.568245 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerDied","Data":"2696471643d2e0fe14b54a335aee3091d21a0ad84005def235cb124eca7c95b3"} Nov 24 08:04:21 crc kubenswrapper[4691]: I1124 08:04:21.568654 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerStarted","Data":"2f66d4866e1d53d3e1351796c2aebb19bcfe0badbd5b9c37eb4d97650922dfd8"} Nov 24 08:04:21 crc kubenswrapper[4691]: I1124 08:04:21.568684 4691 scope.go:117] "RemoveContainer" containerID="bbab02677147f198836fd4ddde22edf489b41ab139d1f179b1e6b15afacb65fb" Nov 24 08:05:19 crc kubenswrapper[4691]: I1124 08:05:19.038705 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-kldbm"] Nov 24 08:05:19 crc kubenswrapper[4691]: I1124 08:05:19.040260 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-kldbm" Nov 24 08:05:19 crc kubenswrapper[4691]: I1124 08:05:19.063399 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-kldbm"] Nov 24 08:05:19 crc kubenswrapper[4691]: I1124 08:05:19.174265 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/35fd3b63-90f7-4def-bfda-5f2bcf54ba57-ca-trust-extracted\") pod \"image-registry-66df7c8f76-kldbm\" (UID: \"35fd3b63-90f7-4def-bfda-5f2bcf54ba57\") " pod="openshift-image-registry/image-registry-66df7c8f76-kldbm" Nov 24 08:05:19 crc kubenswrapper[4691]: I1124 08:05:19.174337 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/35fd3b63-90f7-4def-bfda-5f2bcf54ba57-installation-pull-secrets\") pod \"image-registry-66df7c8f76-kldbm\" (UID: \"35fd3b63-90f7-4def-bfda-5f2bcf54ba57\") " pod="openshift-image-registry/image-registry-66df7c8f76-kldbm" Nov 24 08:05:19 crc kubenswrapper[4691]: I1124 08:05:19.174380 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mn6tv\" (UniqueName: \"kubernetes.io/projected/35fd3b63-90f7-4def-bfda-5f2bcf54ba57-kube-api-access-mn6tv\") pod \"image-registry-66df7c8f76-kldbm\" (UID: \"35fd3b63-90f7-4def-bfda-5f2bcf54ba57\") " pod="openshift-image-registry/image-registry-66df7c8f76-kldbm" Nov 24 08:05:19 crc kubenswrapper[4691]: I1124 08:05:19.174413 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/35fd3b63-90f7-4def-bfda-5f2bcf54ba57-bound-sa-token\") pod \"image-registry-66df7c8f76-kldbm\" (UID: \"35fd3b63-90f7-4def-bfda-5f2bcf54ba57\") " pod="openshift-image-registry/image-registry-66df7c8f76-kldbm" Nov 24 08:05:19 crc kubenswrapper[4691]: I1124 08:05:19.174477 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-kldbm\" (UID: \"35fd3b63-90f7-4def-bfda-5f2bcf54ba57\") " pod="openshift-image-registry/image-registry-66df7c8f76-kldbm" Nov 24 08:05:19 crc kubenswrapper[4691]: I1124 08:05:19.174553 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/35fd3b63-90f7-4def-bfda-5f2bcf54ba57-trusted-ca\") pod \"image-registry-66df7c8f76-kldbm\" (UID: \"35fd3b63-90f7-4def-bfda-5f2bcf54ba57\") " pod="openshift-image-registry/image-registry-66df7c8f76-kldbm" Nov 24 08:05:19 crc kubenswrapper[4691]: I1124 08:05:19.174614 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/35fd3b63-90f7-4def-bfda-5f2bcf54ba57-registry-certificates\") pod \"image-registry-66df7c8f76-kldbm\" (UID: \"35fd3b63-90f7-4def-bfda-5f2bcf54ba57\") " pod="openshift-image-registry/image-registry-66df7c8f76-kldbm" Nov 24 08:05:19 crc kubenswrapper[4691]: I1124 08:05:19.174658 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/35fd3b63-90f7-4def-bfda-5f2bcf54ba57-registry-tls\") pod \"image-registry-66df7c8f76-kldbm\" (UID: \"35fd3b63-90f7-4def-bfda-5f2bcf54ba57\") " pod="openshift-image-registry/image-registry-66df7c8f76-kldbm" Nov 24 08:05:19 crc kubenswrapper[4691]: I1124 08:05:19.200056 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-kldbm\" (UID: \"35fd3b63-90f7-4def-bfda-5f2bcf54ba57\") " pod="openshift-image-registry/image-registry-66df7c8f76-kldbm" Nov 24 08:05:19 crc kubenswrapper[4691]: I1124 08:05:19.276369 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/35fd3b63-90f7-4def-bfda-5f2bcf54ba57-ca-trust-extracted\") pod \"image-registry-66df7c8f76-kldbm\" (UID: \"35fd3b63-90f7-4def-bfda-5f2bcf54ba57\") " pod="openshift-image-registry/image-registry-66df7c8f76-kldbm" Nov 24 08:05:19 crc kubenswrapper[4691]: I1124 08:05:19.276441 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/35fd3b63-90f7-4def-bfda-5f2bcf54ba57-installation-pull-secrets\") pod \"image-registry-66df7c8f76-kldbm\" (UID: \"35fd3b63-90f7-4def-bfda-5f2bcf54ba57\") " pod="openshift-image-registry/image-registry-66df7c8f76-kldbm" Nov 24 08:05:19 crc kubenswrapper[4691]: I1124 08:05:19.276488 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mn6tv\" (UniqueName: \"kubernetes.io/projected/35fd3b63-90f7-4def-bfda-5f2bcf54ba57-kube-api-access-mn6tv\") pod \"image-registry-66df7c8f76-kldbm\" (UID: \"35fd3b63-90f7-4def-bfda-5f2bcf54ba57\") " pod="openshift-image-registry/image-registry-66df7c8f76-kldbm" Nov 24 08:05:19 crc kubenswrapper[4691]: I1124 08:05:19.276523 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/35fd3b63-90f7-4def-bfda-5f2bcf54ba57-bound-sa-token\") pod \"image-registry-66df7c8f76-kldbm\" (UID: \"35fd3b63-90f7-4def-bfda-5f2bcf54ba57\") " pod="openshift-image-registry/image-registry-66df7c8f76-kldbm" Nov 24 08:05:19 crc kubenswrapper[4691]: I1124 08:05:19.276559 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/35fd3b63-90f7-4def-bfda-5f2bcf54ba57-trusted-ca\") pod \"image-registry-66df7c8f76-kldbm\" (UID: \"35fd3b63-90f7-4def-bfda-5f2bcf54ba57\") " pod="openshift-image-registry/image-registry-66df7c8f76-kldbm" Nov 24 08:05:19 crc kubenswrapper[4691]: I1124 08:05:19.276615 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/35fd3b63-90f7-4def-bfda-5f2bcf54ba57-registry-certificates\") pod \"image-registry-66df7c8f76-kldbm\" (UID: \"35fd3b63-90f7-4def-bfda-5f2bcf54ba57\") " pod="openshift-image-registry/image-registry-66df7c8f76-kldbm" Nov 24 08:05:19 crc kubenswrapper[4691]: I1124 08:05:19.276647 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/35fd3b63-90f7-4def-bfda-5f2bcf54ba57-registry-tls\") pod \"image-registry-66df7c8f76-kldbm\" (UID: \"35fd3b63-90f7-4def-bfda-5f2bcf54ba57\") " pod="openshift-image-registry/image-registry-66df7c8f76-kldbm" Nov 24 08:05:19 crc kubenswrapper[4691]: I1124 08:05:19.277651 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/35fd3b63-90f7-4def-bfda-5f2bcf54ba57-ca-trust-extracted\") pod \"image-registry-66df7c8f76-kldbm\" (UID: \"35fd3b63-90f7-4def-bfda-5f2bcf54ba57\") " pod="openshift-image-registry/image-registry-66df7c8f76-kldbm" Nov 24 08:05:19 crc kubenswrapper[4691]: I1124 08:05:19.278440 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/35fd3b63-90f7-4def-bfda-5f2bcf54ba57-trusted-ca\") pod \"image-registry-66df7c8f76-kldbm\" (UID: \"35fd3b63-90f7-4def-bfda-5f2bcf54ba57\") " pod="openshift-image-registry/image-registry-66df7c8f76-kldbm" Nov 24 08:05:19 crc kubenswrapper[4691]: I1124 08:05:19.278624 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/35fd3b63-90f7-4def-bfda-5f2bcf54ba57-registry-certificates\") pod \"image-registry-66df7c8f76-kldbm\" (UID: \"35fd3b63-90f7-4def-bfda-5f2bcf54ba57\") " pod="openshift-image-registry/image-registry-66df7c8f76-kldbm" Nov 24 08:05:19 crc kubenswrapper[4691]: I1124 08:05:19.293400 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/35fd3b63-90f7-4def-bfda-5f2bcf54ba57-installation-pull-secrets\") pod \"image-registry-66df7c8f76-kldbm\" (UID: \"35fd3b63-90f7-4def-bfda-5f2bcf54ba57\") " pod="openshift-image-registry/image-registry-66df7c8f76-kldbm" Nov 24 08:05:19 crc kubenswrapper[4691]: I1124 08:05:19.293427 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/35fd3b63-90f7-4def-bfda-5f2bcf54ba57-registry-tls\") pod \"image-registry-66df7c8f76-kldbm\" (UID: \"35fd3b63-90f7-4def-bfda-5f2bcf54ba57\") " pod="openshift-image-registry/image-registry-66df7c8f76-kldbm" Nov 24 08:05:19 crc kubenswrapper[4691]: I1124 08:05:19.302821 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/35fd3b63-90f7-4def-bfda-5f2bcf54ba57-bound-sa-token\") pod \"image-registry-66df7c8f76-kldbm\" (UID: \"35fd3b63-90f7-4def-bfda-5f2bcf54ba57\") " pod="openshift-image-registry/image-registry-66df7c8f76-kldbm" Nov 24 08:05:19 crc kubenswrapper[4691]: I1124 08:05:19.303409 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mn6tv\" (UniqueName: \"kubernetes.io/projected/35fd3b63-90f7-4def-bfda-5f2bcf54ba57-kube-api-access-mn6tv\") pod \"image-registry-66df7c8f76-kldbm\" (UID: \"35fd3b63-90f7-4def-bfda-5f2bcf54ba57\") " pod="openshift-image-registry/image-registry-66df7c8f76-kldbm" Nov 24 08:05:19 crc kubenswrapper[4691]: I1124 08:05:19.360349 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-kldbm" Nov 24 08:05:19 crc kubenswrapper[4691]: I1124 08:05:19.579738 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-kldbm"] Nov 24 08:05:19 crc kubenswrapper[4691]: I1124 08:05:19.959398 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-kldbm" event={"ID":"35fd3b63-90f7-4def-bfda-5f2bcf54ba57","Type":"ContainerStarted","Data":"100883d61c2c16f4a3eb96bcc849bd5d1bc9f6f5fa13390965eba76036d5c0a2"} Nov 24 08:05:19 crc kubenswrapper[4691]: I1124 08:05:19.959471 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-kldbm" event={"ID":"35fd3b63-90f7-4def-bfda-5f2bcf54ba57","Type":"ContainerStarted","Data":"5e4e554ebb04dff7a39900493b96aa2e6f849963969aa55befdcfdd98a4da2d3"} Nov 24 08:05:19 crc kubenswrapper[4691]: I1124 08:05:19.959569 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-kldbm" Nov 24 08:05:19 crc kubenswrapper[4691]: I1124 08:05:19.985085 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-kldbm" podStartSLOduration=0.985062765 podStartE2EDuration="985.062765ms" podCreationTimestamp="2025-11-24 08:05:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:05:19.983503539 +0000 UTC m=+481.982452828" watchObservedRunningTime="2025-11-24 08:05:19.985062765 +0000 UTC m=+481.984012014" Nov 24 08:05:39 crc kubenswrapper[4691]: I1124 08:05:39.369414 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-kldbm" Nov 24 08:05:39 crc kubenswrapper[4691]: I1124 08:05:39.431155 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-dbzsg"] Nov 24 08:06:04 crc kubenswrapper[4691]: I1124 08:06:04.487108 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" podUID="cde6026c-736d-47f2-ab64-deb47de62820" containerName="registry" containerID="cri-o://c459fced275640d0b1beaf832f9fb42975bb17019431036a88069fb04e87e5a5" gracePeriod=30 Nov 24 08:06:04 crc kubenswrapper[4691]: I1124 08:06:04.873600 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:06:05 crc kubenswrapper[4691]: I1124 08:06:05.041613 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cde6026c-736d-47f2-ab64-deb47de62820-trusted-ca\") pod \"cde6026c-736d-47f2-ab64-deb47de62820\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " Nov 24 08:06:05 crc kubenswrapper[4691]: I1124 08:06:05.041754 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dcvjk\" (UniqueName: \"kubernetes.io/projected/cde6026c-736d-47f2-ab64-deb47de62820-kube-api-access-dcvjk\") pod \"cde6026c-736d-47f2-ab64-deb47de62820\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " Nov 24 08:06:05 crc kubenswrapper[4691]: I1124 08:06:05.042657 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cde6026c-736d-47f2-ab64-deb47de62820-registry-certificates\") pod \"cde6026c-736d-47f2-ab64-deb47de62820\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " Nov 24 08:06:05 crc kubenswrapper[4691]: I1124 08:06:05.042808 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cde6026c-736d-47f2-ab64-deb47de62820-registry-tls\") pod \"cde6026c-736d-47f2-ab64-deb47de62820\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " Nov 24 08:06:05 crc kubenswrapper[4691]: I1124 08:06:05.042813 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cde6026c-736d-47f2-ab64-deb47de62820-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "cde6026c-736d-47f2-ab64-deb47de62820" (UID: "cde6026c-736d-47f2-ab64-deb47de62820"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:06:05 crc kubenswrapper[4691]: I1124 08:06:05.043063 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"cde6026c-736d-47f2-ab64-deb47de62820\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " Nov 24 08:06:05 crc kubenswrapper[4691]: I1124 08:06:05.043132 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cde6026c-736d-47f2-ab64-deb47de62820-ca-trust-extracted\") pod \"cde6026c-736d-47f2-ab64-deb47de62820\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " Nov 24 08:06:05 crc kubenswrapper[4691]: I1124 08:06:05.043183 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cde6026c-736d-47f2-ab64-deb47de62820-installation-pull-secrets\") pod \"cde6026c-736d-47f2-ab64-deb47de62820\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " Nov 24 08:06:05 crc kubenswrapper[4691]: I1124 08:06:05.043222 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cde6026c-736d-47f2-ab64-deb47de62820-bound-sa-token\") pod \"cde6026c-736d-47f2-ab64-deb47de62820\" (UID: \"cde6026c-736d-47f2-ab64-deb47de62820\") " Nov 24 08:06:05 crc kubenswrapper[4691]: I1124 08:06:05.043743 4691 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cde6026c-736d-47f2-ab64-deb47de62820-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 24 08:06:05 crc kubenswrapper[4691]: I1124 08:06:05.044193 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cde6026c-736d-47f2-ab64-deb47de62820-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "cde6026c-736d-47f2-ab64-deb47de62820" (UID: "cde6026c-736d-47f2-ab64-deb47de62820"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:06:05 crc kubenswrapper[4691]: I1124 08:06:05.050582 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cde6026c-736d-47f2-ab64-deb47de62820-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "cde6026c-736d-47f2-ab64-deb47de62820" (UID: "cde6026c-736d-47f2-ab64-deb47de62820"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:06:05 crc kubenswrapper[4691]: I1124 08:06:05.050882 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cde6026c-736d-47f2-ab64-deb47de62820-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "cde6026c-736d-47f2-ab64-deb47de62820" (UID: "cde6026c-736d-47f2-ab64-deb47de62820"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:06:05 crc kubenswrapper[4691]: I1124 08:06:05.051288 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cde6026c-736d-47f2-ab64-deb47de62820-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "cde6026c-736d-47f2-ab64-deb47de62820" (UID: "cde6026c-736d-47f2-ab64-deb47de62820"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:06:05 crc kubenswrapper[4691]: I1124 08:06:05.054736 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "cde6026c-736d-47f2-ab64-deb47de62820" (UID: "cde6026c-736d-47f2-ab64-deb47de62820"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 24 08:06:05 crc kubenswrapper[4691]: I1124 08:06:05.055702 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cde6026c-736d-47f2-ab64-deb47de62820-kube-api-access-dcvjk" (OuterVolumeSpecName: "kube-api-access-dcvjk") pod "cde6026c-736d-47f2-ab64-deb47de62820" (UID: "cde6026c-736d-47f2-ab64-deb47de62820"). InnerVolumeSpecName "kube-api-access-dcvjk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:06:05 crc kubenswrapper[4691]: I1124 08:06:05.062273 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cde6026c-736d-47f2-ab64-deb47de62820-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "cde6026c-736d-47f2-ab64-deb47de62820" (UID: "cde6026c-736d-47f2-ab64-deb47de62820"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:06:05 crc kubenswrapper[4691]: I1124 08:06:05.145230 4691 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cde6026c-736d-47f2-ab64-deb47de62820-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 24 08:06:05 crc kubenswrapper[4691]: I1124 08:06:05.145282 4691 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cde6026c-736d-47f2-ab64-deb47de62820-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 24 08:06:05 crc kubenswrapper[4691]: I1124 08:06:05.145296 4691 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cde6026c-736d-47f2-ab64-deb47de62820-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 24 08:06:05 crc kubenswrapper[4691]: I1124 08:06:05.145307 4691 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cde6026c-736d-47f2-ab64-deb47de62820-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 24 08:06:05 crc kubenswrapper[4691]: I1124 08:06:05.145317 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dcvjk\" (UniqueName: \"kubernetes.io/projected/cde6026c-736d-47f2-ab64-deb47de62820-kube-api-access-dcvjk\") on node \"crc\" DevicePath \"\"" Nov 24 08:06:05 crc kubenswrapper[4691]: I1124 08:06:05.145327 4691 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cde6026c-736d-47f2-ab64-deb47de62820-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 24 08:06:05 crc kubenswrapper[4691]: I1124 08:06:05.327197 4691 generic.go:334] "Generic (PLEG): container finished" podID="cde6026c-736d-47f2-ab64-deb47de62820" containerID="c459fced275640d0b1beaf832f9fb42975bb17019431036a88069fb04e87e5a5" exitCode=0 Nov 24 08:06:05 crc kubenswrapper[4691]: I1124 08:06:05.327306 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" Nov 24 08:06:05 crc kubenswrapper[4691]: I1124 08:06:05.327319 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" event={"ID":"cde6026c-736d-47f2-ab64-deb47de62820","Type":"ContainerDied","Data":"c459fced275640d0b1beaf832f9fb42975bb17019431036a88069fb04e87e5a5"} Nov 24 08:06:05 crc kubenswrapper[4691]: I1124 08:06:05.327768 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-dbzsg" event={"ID":"cde6026c-736d-47f2-ab64-deb47de62820","Type":"ContainerDied","Data":"9bb5b1286f39b355e49d0feff3367ab614c50aa2c0edd3331a57665150248e99"} Nov 24 08:06:05 crc kubenswrapper[4691]: I1124 08:06:05.327796 4691 scope.go:117] "RemoveContainer" containerID="c459fced275640d0b1beaf832f9fb42975bb17019431036a88069fb04e87e5a5" Nov 24 08:06:05 crc kubenswrapper[4691]: I1124 08:06:05.355135 4691 scope.go:117] "RemoveContainer" containerID="c459fced275640d0b1beaf832f9fb42975bb17019431036a88069fb04e87e5a5" Nov 24 08:06:05 crc kubenswrapper[4691]: E1124 08:06:05.355648 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c459fced275640d0b1beaf832f9fb42975bb17019431036a88069fb04e87e5a5\": container with ID starting with c459fced275640d0b1beaf832f9fb42975bb17019431036a88069fb04e87e5a5 not found: ID does not exist" containerID="c459fced275640d0b1beaf832f9fb42975bb17019431036a88069fb04e87e5a5" Nov 24 08:06:05 crc kubenswrapper[4691]: I1124 08:06:05.355685 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c459fced275640d0b1beaf832f9fb42975bb17019431036a88069fb04e87e5a5"} err="failed to get container status \"c459fced275640d0b1beaf832f9fb42975bb17019431036a88069fb04e87e5a5\": rpc error: code = NotFound desc = could not find container \"c459fced275640d0b1beaf832f9fb42975bb17019431036a88069fb04e87e5a5\": container with ID starting with c459fced275640d0b1beaf832f9fb42975bb17019431036a88069fb04e87e5a5 not found: ID does not exist" Nov 24 08:06:05 crc kubenswrapper[4691]: I1124 08:06:05.372357 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-dbzsg"] Nov 24 08:06:05 crc kubenswrapper[4691]: I1124 08:06:05.375912 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-dbzsg"] Nov 24 08:06:06 crc kubenswrapper[4691]: I1124 08:06:06.773313 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cde6026c-736d-47f2-ab64-deb47de62820" path="/var/lib/kubelet/pods/cde6026c-736d-47f2-ab64-deb47de62820/volumes" Nov 24 08:06:18 crc kubenswrapper[4691]: I1124 08:06:18.953641 4691 scope.go:117] "RemoveContainer" containerID="fb983d56ca99fd108b0e79acfee1af28612ccc9408fc295f4f212a09f4cfcae6" Nov 24 08:06:21 crc kubenswrapper[4691]: I1124 08:06:21.090345 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:06:21 crc kubenswrapper[4691]: I1124 08:06:21.090927 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:06:51 crc kubenswrapper[4691]: I1124 08:06:51.090305 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:06:51 crc kubenswrapper[4691]: I1124 08:06:51.091171 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:06:57 crc kubenswrapper[4691]: I1124 08:06:57.475626 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-5rltx"] Nov 24 08:06:57 crc kubenswrapper[4691]: E1124 08:06:57.476899 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cde6026c-736d-47f2-ab64-deb47de62820" containerName="registry" Nov 24 08:06:57 crc kubenswrapper[4691]: I1124 08:06:57.476923 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="cde6026c-736d-47f2-ab64-deb47de62820" containerName="registry" Nov 24 08:06:57 crc kubenswrapper[4691]: I1124 08:06:57.477083 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="cde6026c-736d-47f2-ab64-deb47de62820" containerName="registry" Nov 24 08:06:57 crc kubenswrapper[4691]: I1124 08:06:57.477759 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-5rltx" Nov 24 08:06:57 crc kubenswrapper[4691]: I1124 08:06:57.479631 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-58b88"] Nov 24 08:06:57 crc kubenswrapper[4691]: I1124 08:06:57.480688 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-58b88" Nov 24 08:06:57 crc kubenswrapper[4691]: I1124 08:06:57.481718 4691 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-bvvgg" Nov 24 08:06:57 crc kubenswrapper[4691]: I1124 08:06:57.482349 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 24 08:06:57 crc kubenswrapper[4691]: I1124 08:06:57.490004 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 24 08:06:57 crc kubenswrapper[4691]: I1124 08:06:57.490201 4691 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-fj5b4" Nov 24 08:06:57 crc kubenswrapper[4691]: I1124 08:06:57.496190 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-5rltx"] Nov 24 08:06:57 crc kubenswrapper[4691]: I1124 08:06:57.508128 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-lmkrt"] Nov 24 08:06:57 crc kubenswrapper[4691]: I1124 08:06:57.509121 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-lmkrt" Nov 24 08:06:57 crc kubenswrapper[4691]: I1124 08:06:57.510774 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-58b88"] Nov 24 08:06:57 crc kubenswrapper[4691]: I1124 08:06:57.511659 4691 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-87s7t" Nov 24 08:06:57 crc kubenswrapper[4691]: I1124 08:06:57.534828 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-lmkrt"] Nov 24 08:06:57 crc kubenswrapper[4691]: I1124 08:06:57.623479 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7t7kk\" (UniqueName: \"kubernetes.io/projected/40cf1922-077a-482f-9ffa-7dd636da29ef-kube-api-access-7t7kk\") pod \"cert-manager-webhook-5655c58dd6-lmkrt\" (UID: \"40cf1922-077a-482f-9ffa-7dd636da29ef\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-lmkrt" Nov 24 08:06:57 crc kubenswrapper[4691]: I1124 08:06:57.623543 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fzpnc\" (UniqueName: \"kubernetes.io/projected/78886f3b-0708-4e26-bc7d-ade51d1b3e9c-kube-api-access-fzpnc\") pod \"cert-manager-cainjector-7f985d654d-5rltx\" (UID: \"78886f3b-0708-4e26-bc7d-ade51d1b3e9c\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-5rltx" Nov 24 08:06:57 crc kubenswrapper[4691]: I1124 08:06:57.623568 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p66qj\" (UniqueName: \"kubernetes.io/projected/345048fa-fc45-40c3-bd90-e517c3594a2a-kube-api-access-p66qj\") pod \"cert-manager-5b446d88c5-58b88\" (UID: \"345048fa-fc45-40c3-bd90-e517c3594a2a\") " pod="cert-manager/cert-manager-5b446d88c5-58b88" Nov 24 08:06:57 crc kubenswrapper[4691]: I1124 08:06:57.725105 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7t7kk\" (UniqueName: \"kubernetes.io/projected/40cf1922-077a-482f-9ffa-7dd636da29ef-kube-api-access-7t7kk\") pod \"cert-manager-webhook-5655c58dd6-lmkrt\" (UID: \"40cf1922-077a-482f-9ffa-7dd636da29ef\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-lmkrt" Nov 24 08:06:57 crc kubenswrapper[4691]: I1124 08:06:57.726159 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fzpnc\" (UniqueName: \"kubernetes.io/projected/78886f3b-0708-4e26-bc7d-ade51d1b3e9c-kube-api-access-fzpnc\") pod \"cert-manager-cainjector-7f985d654d-5rltx\" (UID: \"78886f3b-0708-4e26-bc7d-ade51d1b3e9c\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-5rltx" Nov 24 08:06:57 crc kubenswrapper[4691]: I1124 08:06:57.726611 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p66qj\" (UniqueName: \"kubernetes.io/projected/345048fa-fc45-40c3-bd90-e517c3594a2a-kube-api-access-p66qj\") pod \"cert-manager-5b446d88c5-58b88\" (UID: \"345048fa-fc45-40c3-bd90-e517c3594a2a\") " pod="cert-manager/cert-manager-5b446d88c5-58b88" Nov 24 08:06:57 crc kubenswrapper[4691]: I1124 08:06:57.748889 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fzpnc\" (UniqueName: \"kubernetes.io/projected/78886f3b-0708-4e26-bc7d-ade51d1b3e9c-kube-api-access-fzpnc\") pod \"cert-manager-cainjector-7f985d654d-5rltx\" (UID: \"78886f3b-0708-4e26-bc7d-ade51d1b3e9c\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-5rltx" Nov 24 08:06:57 crc kubenswrapper[4691]: I1124 08:06:57.748916 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7t7kk\" (UniqueName: \"kubernetes.io/projected/40cf1922-077a-482f-9ffa-7dd636da29ef-kube-api-access-7t7kk\") pod \"cert-manager-webhook-5655c58dd6-lmkrt\" (UID: \"40cf1922-077a-482f-9ffa-7dd636da29ef\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-lmkrt" Nov 24 08:06:57 crc kubenswrapper[4691]: I1124 08:06:57.749879 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p66qj\" (UniqueName: \"kubernetes.io/projected/345048fa-fc45-40c3-bd90-e517c3594a2a-kube-api-access-p66qj\") pod \"cert-manager-5b446d88c5-58b88\" (UID: \"345048fa-fc45-40c3-bd90-e517c3594a2a\") " pod="cert-manager/cert-manager-5b446d88c5-58b88" Nov 24 08:06:57 crc kubenswrapper[4691]: I1124 08:06:57.806630 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-5rltx" Nov 24 08:06:57 crc kubenswrapper[4691]: I1124 08:06:57.817659 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-58b88" Nov 24 08:06:57 crc kubenswrapper[4691]: I1124 08:06:57.826577 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-lmkrt" Nov 24 08:06:58 crc kubenswrapper[4691]: I1124 08:06:58.144851 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-lmkrt"] Nov 24 08:06:58 crc kubenswrapper[4691]: I1124 08:06:58.160892 4691 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 08:06:58 crc kubenswrapper[4691]: I1124 08:06:58.287370 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-5rltx"] Nov 24 08:06:58 crc kubenswrapper[4691]: I1124 08:06:58.291650 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-58b88"] Nov 24 08:06:58 crc kubenswrapper[4691]: W1124 08:06:58.295971 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod345048fa_fc45_40c3_bd90_e517c3594a2a.slice/crio-8d7fe774f94d502de4b01b354f081d3116c63c3fd2370d580d2c392ea88fb736 WatchSource:0}: Error finding container 8d7fe774f94d502de4b01b354f081d3116c63c3fd2370d580d2c392ea88fb736: Status 404 returned error can't find the container with id 8d7fe774f94d502de4b01b354f081d3116c63c3fd2370d580d2c392ea88fb736 Nov 24 08:06:58 crc kubenswrapper[4691]: I1124 08:06:58.709239 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-lmkrt" event={"ID":"40cf1922-077a-482f-9ffa-7dd636da29ef","Type":"ContainerStarted","Data":"918c74a3be904ea56e9a0ca2f3476dd4c3bf7b6e577e6cd40aa6b4f263bb96a1"} Nov 24 08:06:58 crc kubenswrapper[4691]: I1124 08:06:58.710868 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-58b88" event={"ID":"345048fa-fc45-40c3-bd90-e517c3594a2a","Type":"ContainerStarted","Data":"8d7fe774f94d502de4b01b354f081d3116c63c3fd2370d580d2c392ea88fb736"} Nov 24 08:06:58 crc kubenswrapper[4691]: I1124 08:06:58.712501 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-5rltx" event={"ID":"78886f3b-0708-4e26-bc7d-ade51d1b3e9c","Type":"ContainerStarted","Data":"9f87a78aa123acc9507cb7cd09c1e40075f44677282dce492f52a451786d5baf"} Nov 24 08:07:01 crc kubenswrapper[4691]: I1124 08:07:01.732492 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-58b88" event={"ID":"345048fa-fc45-40c3-bd90-e517c3594a2a","Type":"ContainerStarted","Data":"7541e5f2b62ff59ecd329bd4ac41ff69db846df7d8878ccc19c4023e1c9ff6e6"} Nov 24 08:07:01 crc kubenswrapper[4691]: I1124 08:07:01.734830 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-5rltx" event={"ID":"78886f3b-0708-4e26-bc7d-ade51d1b3e9c","Type":"ContainerStarted","Data":"9264d79150ee54cce7f4567fcd2367e11406e4711282d541683311e7f8c52f96"} Nov 24 08:07:01 crc kubenswrapper[4691]: I1124 08:07:01.736164 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-lmkrt" event={"ID":"40cf1922-077a-482f-9ffa-7dd636da29ef","Type":"ContainerStarted","Data":"41981f2426d0169015d466efb876547a441a78389ee623dbbc52f7805bf0b848"} Nov 24 08:07:01 crc kubenswrapper[4691]: I1124 08:07:01.736345 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-lmkrt" Nov 24 08:07:01 crc kubenswrapper[4691]: I1124 08:07:01.753177 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-58b88" podStartSLOduration=1.705390171 podStartE2EDuration="4.753148246s" podCreationTimestamp="2025-11-24 08:06:57 +0000 UTC" firstStartedPulling="2025-11-24 08:06:58.298523814 +0000 UTC m=+580.297473063" lastFinishedPulling="2025-11-24 08:07:01.346281899 +0000 UTC m=+583.345231138" observedRunningTime="2025-11-24 08:07:01.751662603 +0000 UTC m=+583.750611852" watchObservedRunningTime="2025-11-24 08:07:01.753148246 +0000 UTC m=+583.752097495" Nov 24 08:07:01 crc kubenswrapper[4691]: I1124 08:07:01.776624 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-lmkrt" podStartSLOduration=1.659976372 podStartE2EDuration="4.776603287s" podCreationTimestamp="2025-11-24 08:06:57 +0000 UTC" firstStartedPulling="2025-11-24 08:06:58.160608499 +0000 UTC m=+580.159557748" lastFinishedPulling="2025-11-24 08:07:01.277235424 +0000 UTC m=+583.276184663" observedRunningTime="2025-11-24 08:07:01.774482095 +0000 UTC m=+583.773431364" watchObservedRunningTime="2025-11-24 08:07:01.776603287 +0000 UTC m=+583.775552536" Nov 24 08:07:07 crc kubenswrapper[4691]: I1124 08:07:07.831171 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-lmkrt" Nov 24 08:07:07 crc kubenswrapper[4691]: I1124 08:07:07.854701 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-5rltx" podStartSLOduration=7.892518851 podStartE2EDuration="10.85467902s" podCreationTimestamp="2025-11-24 08:06:57 +0000 UTC" firstStartedPulling="2025-11-24 08:06:58.297204326 +0000 UTC m=+580.296153575" lastFinishedPulling="2025-11-24 08:07:01.259364485 +0000 UTC m=+583.258313744" observedRunningTime="2025-11-24 08:07:01.790364907 +0000 UTC m=+583.789314156" watchObservedRunningTime="2025-11-24 08:07:07.85467902 +0000 UTC m=+589.853628269" Nov 24 08:07:07 crc kubenswrapper[4691]: I1124 08:07:07.984117 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-6f24c"] Nov 24 08:07:07 crc kubenswrapper[4691]: I1124 08:07:07.984563 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="ovn-controller" containerID="cri-o://59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693" gracePeriod=30 Nov 24 08:07:07 crc kubenswrapper[4691]: I1124 08:07:07.984969 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="sbdb" containerID="cri-o://184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907" gracePeriod=30 Nov 24 08:07:07 crc kubenswrapper[4691]: I1124 08:07:07.985013 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="nbdb" containerID="cri-o://d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883" gracePeriod=30 Nov 24 08:07:07 crc kubenswrapper[4691]: I1124 08:07:07.985041 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="northd" containerID="cri-o://3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539" gracePeriod=30 Nov 24 08:07:07 crc kubenswrapper[4691]: I1124 08:07:07.985075 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669" gracePeriod=30 Nov 24 08:07:07 crc kubenswrapper[4691]: I1124 08:07:07.985109 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="kube-rbac-proxy-node" containerID="cri-o://67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf" gracePeriod=30 Nov 24 08:07:07 crc kubenswrapper[4691]: I1124 08:07:07.985142 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="ovn-acl-logging" containerID="cri-o://7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1" gracePeriod=30 Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.047535 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="ovnkube-controller" containerID="cri-o://cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420" gracePeriod=30 Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.345242 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6f24c_106a6e78-a004-4232-a0a2-efecf2f7c248/ovnkube-controller/3.log" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.347570 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6f24c_106a6e78-a004-4232-a0a2-efecf2f7c248/ovn-acl-logging/0.log" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.347966 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6f24c_106a6e78-a004-4232-a0a2-efecf2f7c248/ovn-controller/0.log" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.348472 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.414054 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-h7pwp"] Nov 24 08:07:08 crc kubenswrapper[4691]: E1124 08:07:08.414264 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="sbdb" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.414275 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="sbdb" Nov 24 08:07:08 crc kubenswrapper[4691]: E1124 08:07:08.414284 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="ovnkube-controller" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.414290 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="ovnkube-controller" Nov 24 08:07:08 crc kubenswrapper[4691]: E1124 08:07:08.414301 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="ovnkube-controller" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.414306 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="ovnkube-controller" Nov 24 08:07:08 crc kubenswrapper[4691]: E1124 08:07:08.414314 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="kubecfg-setup" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.414320 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="kubecfg-setup" Nov 24 08:07:08 crc kubenswrapper[4691]: E1124 08:07:08.414327 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="kube-rbac-proxy-ovn-metrics" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.414334 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="kube-rbac-proxy-ovn-metrics" Nov 24 08:07:08 crc kubenswrapper[4691]: E1124 08:07:08.414342 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="ovnkube-controller" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.414348 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="ovnkube-controller" Nov 24 08:07:08 crc kubenswrapper[4691]: E1124 08:07:08.414357 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="ovn-controller" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.414362 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="ovn-controller" Nov 24 08:07:08 crc kubenswrapper[4691]: E1124 08:07:08.414370 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="ovn-acl-logging" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.414375 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="ovn-acl-logging" Nov 24 08:07:08 crc kubenswrapper[4691]: E1124 08:07:08.414384 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="northd" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.414389 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="northd" Nov 24 08:07:08 crc kubenswrapper[4691]: E1124 08:07:08.414400 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="ovnkube-controller" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.414406 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="ovnkube-controller" Nov 24 08:07:08 crc kubenswrapper[4691]: E1124 08:07:08.414415 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="nbdb" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.414420 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="nbdb" Nov 24 08:07:08 crc kubenswrapper[4691]: E1124 08:07:08.414432 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="kube-rbac-proxy-node" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.414438 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="kube-rbac-proxy-node" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.414543 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="kube-rbac-proxy-ovn-metrics" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.414553 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="northd" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.414560 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="sbdb" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.414567 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="ovnkube-controller" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.414573 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="ovnkube-controller" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.414581 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="ovnkube-controller" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.414588 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="kube-rbac-proxy-node" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.414595 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="ovnkube-controller" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.414601 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="ovn-acl-logging" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.414607 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="nbdb" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.414613 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="ovnkube-controller" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.414621 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="ovn-controller" Nov 24 08:07:08 crc kubenswrapper[4691]: E1124 08:07:08.414709 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="ovnkube-controller" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.414716 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerName="ovnkube-controller" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.416404 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.491364 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-systemd-units\") pod \"106a6e78-a004-4232-a0a2-efecf2f7c248\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.491883 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-run-netns\") pod \"106a6e78-a004-4232-a0a2-efecf2f7c248\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.491944 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-djg58\" (UniqueName: \"kubernetes.io/projected/106a6e78-a004-4232-a0a2-efecf2f7c248-kube-api-access-djg58\") pod \"106a6e78-a004-4232-a0a2-efecf2f7c248\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.492009 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-etc-openvswitch\") pod \"106a6e78-a004-4232-a0a2-efecf2f7c248\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.492038 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/106a6e78-a004-4232-a0a2-efecf2f7c248-ovnkube-config\") pod \"106a6e78-a004-4232-a0a2-efecf2f7c248\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.492072 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-run-systemd\") pod \"106a6e78-a004-4232-a0a2-efecf2f7c248\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.492102 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-var-lib-openvswitch\") pod \"106a6e78-a004-4232-a0a2-efecf2f7c248\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.491552 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "106a6e78-a004-4232-a0a2-efecf2f7c248" (UID: "106a6e78-a004-4232-a0a2-efecf2f7c248"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.492106 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "106a6e78-a004-4232-a0a2-efecf2f7c248" (UID: "106a6e78-a004-4232-a0a2-efecf2f7c248"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.492137 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-cni-netd\") pod \"106a6e78-a004-4232-a0a2-efecf2f7c248\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.492200 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "106a6e78-a004-4232-a0a2-efecf2f7c248" (UID: "106a6e78-a004-4232-a0a2-efecf2f7c248"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.492256 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-run-ovn\") pod \"106a6e78-a004-4232-a0a2-efecf2f7c248\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.492290 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/106a6e78-a004-4232-a0a2-efecf2f7c248-env-overrides\") pod \"106a6e78-a004-4232-a0a2-efecf2f7c248\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.492315 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-cni-bin\") pod \"106a6e78-a004-4232-a0a2-efecf2f7c248\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.492379 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/106a6e78-a004-4232-a0a2-efecf2f7c248-ovn-node-metrics-cert\") pod \"106a6e78-a004-4232-a0a2-efecf2f7c248\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.492393 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-slash\") pod \"106a6e78-a004-4232-a0a2-efecf2f7c248\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.492432 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-run-openvswitch\") pod \"106a6e78-a004-4232-a0a2-efecf2f7c248\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.492475 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-var-lib-cni-networks-ovn-kubernetes\") pod \"106a6e78-a004-4232-a0a2-efecf2f7c248\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.492501 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-kubelet\") pod \"106a6e78-a004-4232-a0a2-efecf2f7c248\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.492526 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-run-ovn-kubernetes\") pod \"106a6e78-a004-4232-a0a2-efecf2f7c248\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.492542 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-node-log\") pod \"106a6e78-a004-4232-a0a2-efecf2f7c248\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.492558 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/106a6e78-a004-4232-a0a2-efecf2f7c248-ovnkube-script-lib\") pod \"106a6e78-a004-4232-a0a2-efecf2f7c248\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.492570 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-log-socket\") pod \"106a6e78-a004-4232-a0a2-efecf2f7c248\" (UID: \"106a6e78-a004-4232-a0a2-efecf2f7c248\") " Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.492761 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.492788 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-log-socket\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.492813 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-host-run-netns\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.492827 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-run-ovn\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.492853 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-host-run-ovn-kubernetes\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.492871 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/59d910cd-27f4-4695-ae89-c36b07cafbf3-ovnkube-config\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.492887 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-etc-openvswitch\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.492920 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-host-slash\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.492936 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-run-openvswitch\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.492986 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wzfdr\" (UniqueName: \"kubernetes.io/projected/59d910cd-27f4-4695-ae89-c36b07cafbf3-kube-api-access-wzfdr\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.493012 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-node-log\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.493030 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-host-cni-bin\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.493055 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-run-systemd\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.493074 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-var-lib-openvswitch\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.493100 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/59d910cd-27f4-4695-ae89-c36b07cafbf3-ovn-node-metrics-cert\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.493126 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-systemd-units\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.493157 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-host-cni-netd\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.493196 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-host-kubelet\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.493226 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/59d910cd-27f4-4695-ae89-c36b07cafbf3-env-overrides\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.493248 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/59d910cd-27f4-4695-ae89-c36b07cafbf3-ovnkube-script-lib\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.493312 4691 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.493325 4691 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.493337 4691 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.493390 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "106a6e78-a004-4232-a0a2-efecf2f7c248" (UID: "106a6e78-a004-4232-a0a2-efecf2f7c248"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.493718 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "106a6e78-a004-4232-a0a2-efecf2f7c248" (UID: "106a6e78-a004-4232-a0a2-efecf2f7c248"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.493777 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "106a6e78-a004-4232-a0a2-efecf2f7c248" (UID: "106a6e78-a004-4232-a0a2-efecf2f7c248"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.493816 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "106a6e78-a004-4232-a0a2-efecf2f7c248" (UID: "106a6e78-a004-4232-a0a2-efecf2f7c248"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.493840 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "106a6e78-a004-4232-a0a2-efecf2f7c248" (UID: "106a6e78-a004-4232-a0a2-efecf2f7c248"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.493850 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-node-log" (OuterVolumeSpecName: "node-log") pod "106a6e78-a004-4232-a0a2-efecf2f7c248" (UID: "106a6e78-a004-4232-a0a2-efecf2f7c248"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.493942 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/106a6e78-a004-4232-a0a2-efecf2f7c248-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "106a6e78-a004-4232-a0a2-efecf2f7c248" (UID: "106a6e78-a004-4232-a0a2-efecf2f7c248"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.493999 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/106a6e78-a004-4232-a0a2-efecf2f7c248-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "106a6e78-a004-4232-a0a2-efecf2f7c248" (UID: "106a6e78-a004-4232-a0a2-efecf2f7c248"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.494061 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "106a6e78-a004-4232-a0a2-efecf2f7c248" (UID: "106a6e78-a004-4232-a0a2-efecf2f7c248"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.494088 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-slash" (OuterVolumeSpecName: "host-slash") pod "106a6e78-a004-4232-a0a2-efecf2f7c248" (UID: "106a6e78-a004-4232-a0a2-efecf2f7c248"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.494117 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-log-socket" (OuterVolumeSpecName: "log-socket") pod "106a6e78-a004-4232-a0a2-efecf2f7c248" (UID: "106a6e78-a004-4232-a0a2-efecf2f7c248"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.494131 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "106a6e78-a004-4232-a0a2-efecf2f7c248" (UID: "106a6e78-a004-4232-a0a2-efecf2f7c248"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.494150 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "106a6e78-a004-4232-a0a2-efecf2f7c248" (UID: "106a6e78-a004-4232-a0a2-efecf2f7c248"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.494168 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/106a6e78-a004-4232-a0a2-efecf2f7c248-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "106a6e78-a004-4232-a0a2-efecf2f7c248" (UID: "106a6e78-a004-4232-a0a2-efecf2f7c248"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.498925 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/106a6e78-a004-4232-a0a2-efecf2f7c248-kube-api-access-djg58" (OuterVolumeSpecName: "kube-api-access-djg58") pod "106a6e78-a004-4232-a0a2-efecf2f7c248" (UID: "106a6e78-a004-4232-a0a2-efecf2f7c248"). InnerVolumeSpecName "kube-api-access-djg58". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.499100 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/106a6e78-a004-4232-a0a2-efecf2f7c248-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "106a6e78-a004-4232-a0a2-efecf2f7c248" (UID: "106a6e78-a004-4232-a0a2-efecf2f7c248"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.507583 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "106a6e78-a004-4232-a0a2-efecf2f7c248" (UID: "106a6e78-a004-4232-a0a2-efecf2f7c248"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.594920 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wzfdr\" (UniqueName: \"kubernetes.io/projected/59d910cd-27f4-4695-ae89-c36b07cafbf3-kube-api-access-wzfdr\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.595019 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-node-log\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.595048 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-host-cni-bin\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.595074 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-run-systemd\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.595097 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-var-lib-openvswitch\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.595121 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/59d910cd-27f4-4695-ae89-c36b07cafbf3-ovn-node-metrics-cert\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.595144 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-systemd-units\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.595171 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-host-cni-netd\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.595211 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-host-cni-bin\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.595248 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-run-systemd\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.595306 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-host-cni-netd\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.595261 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-var-lib-openvswitch\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.595332 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-systemd-units\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.595407 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-host-kubelet\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.595517 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/59d910cd-27f4-4695-ae89-c36b07cafbf3-ovnkube-script-lib\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.595547 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/59d910cd-27f4-4695-ae89-c36b07cafbf3-env-overrides\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.595616 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.595543 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-host-kubelet\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.595663 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-log-socket\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.595644 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-log-socket\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.595689 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.595749 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-host-run-netns\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.595782 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-run-ovn\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.595824 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-host-run-ovn-kubernetes\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.595852 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/59d910cd-27f4-4695-ae89-c36b07cafbf3-ovnkube-config\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.595857 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-host-run-netns\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.595874 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-etc-openvswitch\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.595918 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-host-run-ovn-kubernetes\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.595947 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-run-ovn\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.595967 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-host-slash\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.595992 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-run-openvswitch\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.596118 4691 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.596132 4691 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/106a6e78-a004-4232-a0a2-efecf2f7c248-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.596145 4691 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.596157 4691 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-slash\") on node \"crc\" DevicePath \"\"" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.596170 4691 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/106a6e78-a004-4232-a0a2-efecf2f7c248-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.596182 4691 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.596194 4691 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.596206 4691 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.596219 4691 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.596231 4691 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-node-log\") on node \"crc\" DevicePath \"\"" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.596244 4691 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-log-socket\") on node \"crc\" DevicePath \"\"" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.596255 4691 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/106a6e78-a004-4232-a0a2-efecf2f7c248-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.596266 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-djg58\" (UniqueName: \"kubernetes.io/projected/106a6e78-a004-4232-a0a2-efecf2f7c248-kube-api-access-djg58\") on node \"crc\" DevicePath \"\"" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.596277 4691 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.596288 4691 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/106a6e78-a004-4232-a0a2-efecf2f7c248-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.596299 4691 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.596310 4691 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/106a6e78-a004-4232-a0a2-efecf2f7c248-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.596345 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-run-openvswitch\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.596366 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/59d910cd-27f4-4695-ae89-c36b07cafbf3-env-overrides\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.596376 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-etc-openvswitch\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.596411 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-host-slash\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.596464 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/59d910cd-27f4-4695-ae89-c36b07cafbf3-ovnkube-script-lib\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.596783 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/59d910cd-27f4-4695-ae89-c36b07cafbf3-ovnkube-config\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.597184 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/59d910cd-27f4-4695-ae89-c36b07cafbf3-node-log\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.600515 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/59d910cd-27f4-4695-ae89-c36b07cafbf3-ovn-node-metrics-cert\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.621146 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wzfdr\" (UniqueName: \"kubernetes.io/projected/59d910cd-27f4-4695-ae89-c36b07cafbf3-kube-api-access-wzfdr\") pod \"ovnkube-node-h7pwp\" (UID: \"59d910cd-27f4-4695-ae89-c36b07cafbf3\") " pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.732817 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.791605 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6f24c_106a6e78-a004-4232-a0a2-efecf2f7c248/ovnkube-controller/3.log" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.795540 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6f24c_106a6e78-a004-4232-a0a2-efecf2f7c248/ovn-acl-logging/0.log" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.796479 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6f24c_106a6e78-a004-4232-a0a2-efecf2f7c248/ovn-controller/0.log" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797021 4691 generic.go:334] "Generic (PLEG): container finished" podID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerID="cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420" exitCode=0 Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797078 4691 generic.go:334] "Generic (PLEG): container finished" podID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerID="184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907" exitCode=0 Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797101 4691 generic.go:334] "Generic (PLEG): container finished" podID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerID="d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883" exitCode=0 Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797126 4691 generic.go:334] "Generic (PLEG): container finished" podID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerID="3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539" exitCode=0 Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797151 4691 generic.go:334] "Generic (PLEG): container finished" podID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerID="7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669" exitCode=0 Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797179 4691 generic.go:334] "Generic (PLEG): container finished" podID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerID="67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf" exitCode=0 Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797200 4691 generic.go:334] "Generic (PLEG): container finished" podID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerID="7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1" exitCode=143 Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797223 4691 generic.go:334] "Generic (PLEG): container finished" podID="106a6e78-a004-4232-a0a2-efecf2f7c248" containerID="59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693" exitCode=143 Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797319 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" event={"ID":"106a6e78-a004-4232-a0a2-efecf2f7c248","Type":"ContainerDied","Data":"cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797371 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" event={"ID":"106a6e78-a004-4232-a0a2-efecf2f7c248","Type":"ContainerDied","Data":"184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797400 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" event={"ID":"106a6e78-a004-4232-a0a2-efecf2f7c248","Type":"ContainerDied","Data":"d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797427 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" event={"ID":"106a6e78-a004-4232-a0a2-efecf2f7c248","Type":"ContainerDied","Data":"3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797518 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" event={"ID":"106a6e78-a004-4232-a0a2-efecf2f7c248","Type":"ContainerDied","Data":"7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797549 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" event={"ID":"106a6e78-a004-4232-a0a2-efecf2f7c248","Type":"ContainerDied","Data":"67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797574 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797600 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797616 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797632 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797647 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797662 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797676 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797691 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797706 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797725 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" event={"ID":"106a6e78-a004-4232-a0a2-efecf2f7c248","Type":"ContainerDied","Data":"7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797750 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797766 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797781 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797795 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797811 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797825 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797841 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797856 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797872 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797887 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797908 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" event={"ID":"106a6e78-a004-4232-a0a2-efecf2f7c248","Type":"ContainerDied","Data":"59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797930 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797947 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797961 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797977 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.797991 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.798007 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.798023 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.798038 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.798053 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.798066 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.798087 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" event={"ID":"106a6e78-a004-4232-a0a2-efecf2f7c248","Type":"ContainerDied","Data":"6ebc38bdcbb47f8898ca2377a606c51091bc64c3f9eaeecad1b38e1072edde49"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.798113 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.798129 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.798146 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.798162 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.798176 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.798192 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.798208 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.798223 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.798237 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.798253 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.798285 4691 scope.go:117] "RemoveContainer" containerID="cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.798414 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-6f24c" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.801873 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-gxxrf_b2332a73-f85c-470c-9209-c5e5cd1bc3a1/kube-multus/2.log" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.802779 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-gxxrf_b2332a73-f85c-470c-9209-c5e5cd1bc3a1/kube-multus/1.log" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.802823 4691 generic.go:334] "Generic (PLEG): container finished" podID="b2332a73-f85c-470c-9209-c5e5cd1bc3a1" containerID="b88a5444a724c9be6f939634f2ae4dedc6fb1554307eb43642e1e6350e8cc201" exitCode=2 Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.802917 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-gxxrf" event={"ID":"b2332a73-f85c-470c-9209-c5e5cd1bc3a1","Type":"ContainerDied","Data":"b88a5444a724c9be6f939634f2ae4dedc6fb1554307eb43642e1e6350e8cc201"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.802956 4691 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d2fe7e8123aeaf275afa9ad3f208b5c39433b15d906dbad0c06cd75f8a8183aa"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.803393 4691 scope.go:117] "RemoveContainer" containerID="b88a5444a724c9be6f939634f2ae4dedc6fb1554307eb43642e1e6350e8cc201" Nov 24 08:07:08 crc kubenswrapper[4691]: E1124 08:07:08.804085 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-gxxrf_openshift-multus(b2332a73-f85c-470c-9209-c5e5cd1bc3a1)\"" pod="openshift-multus/multus-gxxrf" podUID="b2332a73-f85c-470c-9209-c5e5cd1bc3a1" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.805428 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" event={"ID":"59d910cd-27f4-4695-ae89-c36b07cafbf3","Type":"ContainerStarted","Data":"546490f48daa4d49f06cf606ee9d456734f6f9ceaa9aa77364804461084b9a42"} Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.824668 4691 scope.go:117] "RemoveContainer" containerID="62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.865807 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-6f24c"] Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.871903 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-6f24c"] Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.873239 4691 scope.go:117] "RemoveContainer" containerID="184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.889769 4691 scope.go:117] "RemoveContainer" containerID="d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.904663 4691 scope.go:117] "RemoveContainer" containerID="3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.918849 4691 scope.go:117] "RemoveContainer" containerID="7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669" Nov 24 08:07:08 crc kubenswrapper[4691]: I1124 08:07:08.933612 4691 scope.go:117] "RemoveContainer" containerID="67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.001803 4691 scope.go:117] "RemoveContainer" containerID="7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.015595 4691 scope.go:117] "RemoveContainer" containerID="59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.027437 4691 scope.go:117] "RemoveContainer" containerID="00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.045026 4691 scope.go:117] "RemoveContainer" containerID="cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420" Nov 24 08:07:09 crc kubenswrapper[4691]: E1124 08:07:09.045541 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420\": container with ID starting with cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420 not found: ID does not exist" containerID="cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.045576 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420"} err="failed to get container status \"cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420\": rpc error: code = NotFound desc = could not find container \"cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420\": container with ID starting with cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420 not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.045605 4691 scope.go:117] "RemoveContainer" containerID="62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805" Nov 24 08:07:09 crc kubenswrapper[4691]: E1124 08:07:09.046104 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805\": container with ID starting with 62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805 not found: ID does not exist" containerID="62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.046131 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805"} err="failed to get container status \"62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805\": rpc error: code = NotFound desc = could not find container \"62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805\": container with ID starting with 62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805 not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.046173 4691 scope.go:117] "RemoveContainer" containerID="184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907" Nov 24 08:07:09 crc kubenswrapper[4691]: E1124 08:07:09.046667 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\": container with ID starting with 184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907 not found: ID does not exist" containerID="184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.046755 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907"} err="failed to get container status \"184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\": rpc error: code = NotFound desc = could not find container \"184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\": container with ID starting with 184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907 not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.046821 4691 scope.go:117] "RemoveContainer" containerID="d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883" Nov 24 08:07:09 crc kubenswrapper[4691]: E1124 08:07:09.047284 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\": container with ID starting with d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883 not found: ID does not exist" containerID="d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.047311 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883"} err="failed to get container status \"d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\": rpc error: code = NotFound desc = could not find container \"d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\": container with ID starting with d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883 not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.047331 4691 scope.go:117] "RemoveContainer" containerID="3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539" Nov 24 08:07:09 crc kubenswrapper[4691]: E1124 08:07:09.047647 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\": container with ID starting with 3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539 not found: ID does not exist" containerID="3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.047676 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539"} err="failed to get container status \"3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\": rpc error: code = NotFound desc = could not find container \"3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\": container with ID starting with 3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539 not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.047690 4691 scope.go:117] "RemoveContainer" containerID="7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669" Nov 24 08:07:09 crc kubenswrapper[4691]: E1124 08:07:09.048234 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\": container with ID starting with 7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669 not found: ID does not exist" containerID="7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.048256 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669"} err="failed to get container status \"7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\": rpc error: code = NotFound desc = could not find container \"7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\": container with ID starting with 7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669 not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.048271 4691 scope.go:117] "RemoveContainer" containerID="67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf" Nov 24 08:07:09 crc kubenswrapper[4691]: E1124 08:07:09.048778 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\": container with ID starting with 67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf not found: ID does not exist" containerID="67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.048799 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf"} err="failed to get container status \"67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\": rpc error: code = NotFound desc = could not find container \"67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\": container with ID starting with 67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.048812 4691 scope.go:117] "RemoveContainer" containerID="7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1" Nov 24 08:07:09 crc kubenswrapper[4691]: E1124 08:07:09.049314 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\": container with ID starting with 7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1 not found: ID does not exist" containerID="7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.049468 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1"} err="failed to get container status \"7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\": rpc error: code = NotFound desc = could not find container \"7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\": container with ID starting with 7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1 not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.049744 4691 scope.go:117] "RemoveContainer" containerID="59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693" Nov 24 08:07:09 crc kubenswrapper[4691]: E1124 08:07:09.050564 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\": container with ID starting with 59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693 not found: ID does not exist" containerID="59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.050611 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693"} err="failed to get container status \"59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\": rpc error: code = NotFound desc = could not find container \"59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\": container with ID starting with 59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693 not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.050640 4691 scope.go:117] "RemoveContainer" containerID="00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb" Nov 24 08:07:09 crc kubenswrapper[4691]: E1124 08:07:09.050919 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\": container with ID starting with 00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb not found: ID does not exist" containerID="00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.050954 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb"} err="failed to get container status \"00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\": rpc error: code = NotFound desc = could not find container \"00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\": container with ID starting with 00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.050978 4691 scope.go:117] "RemoveContainer" containerID="cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.051428 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420"} err="failed to get container status \"cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420\": rpc error: code = NotFound desc = could not find container \"cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420\": container with ID starting with cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420 not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.051598 4691 scope.go:117] "RemoveContainer" containerID="62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.052136 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805"} err="failed to get container status \"62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805\": rpc error: code = NotFound desc = could not find container \"62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805\": container with ID starting with 62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805 not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.052167 4691 scope.go:117] "RemoveContainer" containerID="184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.052699 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907"} err="failed to get container status \"184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\": rpc error: code = NotFound desc = could not find container \"184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\": container with ID starting with 184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907 not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.052730 4691 scope.go:117] "RemoveContainer" containerID="d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.052990 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883"} err="failed to get container status \"d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\": rpc error: code = NotFound desc = could not find container \"d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\": container with ID starting with d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883 not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.053019 4691 scope.go:117] "RemoveContainer" containerID="3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.053504 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539"} err="failed to get container status \"3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\": rpc error: code = NotFound desc = could not find container \"3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\": container with ID starting with 3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539 not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.053537 4691 scope.go:117] "RemoveContainer" containerID="7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.053997 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669"} err="failed to get container status \"7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\": rpc error: code = NotFound desc = could not find container \"7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\": container with ID starting with 7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669 not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.054027 4691 scope.go:117] "RemoveContainer" containerID="67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.054434 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf"} err="failed to get container status \"67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\": rpc error: code = NotFound desc = could not find container \"67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\": container with ID starting with 67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.054495 4691 scope.go:117] "RemoveContainer" containerID="7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.055097 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1"} err="failed to get container status \"7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\": rpc error: code = NotFound desc = could not find container \"7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\": container with ID starting with 7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1 not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.055126 4691 scope.go:117] "RemoveContainer" containerID="59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.055391 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693"} err="failed to get container status \"59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\": rpc error: code = NotFound desc = could not find container \"59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\": container with ID starting with 59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693 not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.055419 4691 scope.go:117] "RemoveContainer" containerID="00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.055711 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb"} err="failed to get container status \"00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\": rpc error: code = NotFound desc = could not find container \"00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\": container with ID starting with 00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.055736 4691 scope.go:117] "RemoveContainer" containerID="cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.056036 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420"} err="failed to get container status \"cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420\": rpc error: code = NotFound desc = could not find container \"cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420\": container with ID starting with cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420 not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.056063 4691 scope.go:117] "RemoveContainer" containerID="62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.056540 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805"} err="failed to get container status \"62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805\": rpc error: code = NotFound desc = could not find container \"62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805\": container with ID starting with 62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805 not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.056571 4691 scope.go:117] "RemoveContainer" containerID="184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.056890 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907"} err="failed to get container status \"184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\": rpc error: code = NotFound desc = could not find container \"184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\": container with ID starting with 184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907 not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.056920 4691 scope.go:117] "RemoveContainer" containerID="d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.057405 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883"} err="failed to get container status \"d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\": rpc error: code = NotFound desc = could not find container \"d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\": container with ID starting with d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883 not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.057435 4691 scope.go:117] "RemoveContainer" containerID="3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.057706 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539"} err="failed to get container status \"3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\": rpc error: code = NotFound desc = could not find container \"3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\": container with ID starting with 3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539 not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.057739 4691 scope.go:117] "RemoveContainer" containerID="7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.058031 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669"} err="failed to get container status \"7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\": rpc error: code = NotFound desc = could not find container \"7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\": container with ID starting with 7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669 not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.058065 4691 scope.go:117] "RemoveContainer" containerID="67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.058668 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf"} err="failed to get container status \"67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\": rpc error: code = NotFound desc = could not find container \"67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\": container with ID starting with 67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.058691 4691 scope.go:117] "RemoveContainer" containerID="7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.059140 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1"} err="failed to get container status \"7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\": rpc error: code = NotFound desc = could not find container \"7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\": container with ID starting with 7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1 not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.059399 4691 scope.go:117] "RemoveContainer" containerID="59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.059882 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693"} err="failed to get container status \"59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\": rpc error: code = NotFound desc = could not find container \"59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\": container with ID starting with 59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693 not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.059915 4691 scope.go:117] "RemoveContainer" containerID="00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.060491 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb"} err="failed to get container status \"00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\": rpc error: code = NotFound desc = could not find container \"00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\": container with ID starting with 00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.060509 4691 scope.go:117] "RemoveContainer" containerID="cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.060790 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420"} err="failed to get container status \"cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420\": rpc error: code = NotFound desc = could not find container \"cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420\": container with ID starting with cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420 not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.060919 4691 scope.go:117] "RemoveContainer" containerID="62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.061406 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805"} err="failed to get container status \"62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805\": rpc error: code = NotFound desc = could not find container \"62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805\": container with ID starting with 62f9fb0e1ed54796357e343a933374f7aa6b3f8425673dab48cdd0da573ca805 not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.061426 4691 scope.go:117] "RemoveContainer" containerID="184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.061743 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907"} err="failed to get container status \"184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\": rpc error: code = NotFound desc = could not find container \"184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907\": container with ID starting with 184c6af6fb8ea1e5e3cee8fe432bedf451eac0e54418612553da6946984b8907 not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.061763 4691 scope.go:117] "RemoveContainer" containerID="d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.062018 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883"} err="failed to get container status \"d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\": rpc error: code = NotFound desc = could not find container \"d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883\": container with ID starting with d591f8e2c7373479953a858201fc803fe1b6d8629ce6fbf4e5c7360af27e9883 not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.062140 4691 scope.go:117] "RemoveContainer" containerID="3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.062530 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539"} err="failed to get container status \"3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\": rpc error: code = NotFound desc = could not find container \"3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539\": container with ID starting with 3d9b8c0d41a85a005e2e4350067d3487bac7b6245a4021ee98739d0623dda539 not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.062549 4691 scope.go:117] "RemoveContainer" containerID="7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.062768 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669"} err="failed to get container status \"7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\": rpc error: code = NotFound desc = could not find container \"7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669\": container with ID starting with 7c23c990f02d5c23fe27eb3cce222759774566b1ea98ba4b8f5f2dc979110669 not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.062807 4691 scope.go:117] "RemoveContainer" containerID="67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.063077 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf"} err="failed to get container status \"67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\": rpc error: code = NotFound desc = could not find container \"67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf\": container with ID starting with 67a1ae04212e3d5d061db66411d150e6f08fb94fb43d32a60769edfcaca70fdf not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.063094 4691 scope.go:117] "RemoveContainer" containerID="7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.063377 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1"} err="failed to get container status \"7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\": rpc error: code = NotFound desc = could not find container \"7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1\": container with ID starting with 7a697fffa53d9b7ca313263f5021b85ab892cb69d96ed6be3f815ca170eb66d1 not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.063395 4691 scope.go:117] "RemoveContainer" containerID="59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.063821 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693"} err="failed to get container status \"59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\": rpc error: code = NotFound desc = could not find container \"59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693\": container with ID starting with 59b93a39962afe32fb9b15843dd8f38ea59f69dd4a067f0f0039d52526282693 not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.063957 4691 scope.go:117] "RemoveContainer" containerID="00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.064471 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb"} err="failed to get container status \"00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\": rpc error: code = NotFound desc = could not find container \"00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb\": container with ID starting with 00289592b6869fdc4fe25570e7c980094d6ab38e32e223144015dad2cbdfe6eb not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.064608 4691 scope.go:117] "RemoveContainer" containerID="cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.065157 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420"} err="failed to get container status \"cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420\": rpc error: code = NotFound desc = could not find container \"cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420\": container with ID starting with cdc131b9fe5a8eab1cec9f59da136cedae8ae0152973d03793ee13d92b3de420 not found: ID does not exist" Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.821356 4691 generic.go:334] "Generic (PLEG): container finished" podID="59d910cd-27f4-4695-ae89-c36b07cafbf3" containerID="11660147d5e3ec9ca1b544dfd47d95ba502600b4f20f7bf9d707d4466c38b078" exitCode=0 Nov 24 08:07:09 crc kubenswrapper[4691]: I1124 08:07:09.821707 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" event={"ID":"59d910cd-27f4-4695-ae89-c36b07cafbf3","Type":"ContainerDied","Data":"11660147d5e3ec9ca1b544dfd47d95ba502600b4f20f7bf9d707d4466c38b078"} Nov 24 08:07:10 crc kubenswrapper[4691]: I1124 08:07:10.769042 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="106a6e78-a004-4232-a0a2-efecf2f7c248" path="/var/lib/kubelet/pods/106a6e78-a004-4232-a0a2-efecf2f7c248/volumes" Nov 24 08:07:10 crc kubenswrapper[4691]: I1124 08:07:10.836398 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" event={"ID":"59d910cd-27f4-4695-ae89-c36b07cafbf3","Type":"ContainerStarted","Data":"f6dfcbcfc26783cf618985c9ef75f9f7107cd273054896f41c9c051008529687"} Nov 24 08:07:10 crc kubenswrapper[4691]: I1124 08:07:10.836487 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" event={"ID":"59d910cd-27f4-4695-ae89-c36b07cafbf3","Type":"ContainerStarted","Data":"21f01bb5f1f0279ebae0c096fa3c4515e06ca69d72b3c9e3e87cdcd31fcd8b8c"} Nov 24 08:07:10 crc kubenswrapper[4691]: I1124 08:07:10.836508 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" event={"ID":"59d910cd-27f4-4695-ae89-c36b07cafbf3","Type":"ContainerStarted","Data":"5a0a82c485312cb0a424618390219e61b503d0f425d7998d4372614d1fe83bf6"} Nov 24 08:07:10 crc kubenswrapper[4691]: I1124 08:07:10.836522 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" event={"ID":"59d910cd-27f4-4695-ae89-c36b07cafbf3","Type":"ContainerStarted","Data":"4f352addc0a5951ec80e14660f1f19fa922b07bd8abaab06cebad9f155f37e20"} Nov 24 08:07:10 crc kubenswrapper[4691]: I1124 08:07:10.836537 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" event={"ID":"59d910cd-27f4-4695-ae89-c36b07cafbf3","Type":"ContainerStarted","Data":"d373e70e5666fef61ee0744700a1e4583c320a60b218c893fd80b5fd93497ff6"} Nov 24 08:07:10 crc kubenswrapper[4691]: I1124 08:07:10.836550 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" event={"ID":"59d910cd-27f4-4695-ae89-c36b07cafbf3","Type":"ContainerStarted","Data":"53b24505cc9618d4d801322352e075ba800cf036ee1c68708233d11e97d242da"} Nov 24 08:07:13 crc kubenswrapper[4691]: I1124 08:07:13.877351 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" event={"ID":"59d910cd-27f4-4695-ae89-c36b07cafbf3","Type":"ContainerStarted","Data":"4eebf4648cc1dd0892f3486705b380fc3b776a3c1f048eb673f11597a4adc119"} Nov 24 08:07:15 crc kubenswrapper[4691]: I1124 08:07:15.901741 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" event={"ID":"59d910cd-27f4-4695-ae89-c36b07cafbf3","Type":"ContainerStarted","Data":"1eec12d467a76ecfd361a33bad970785b9b15ea3dce7a8819b40e6000aba196e"} Nov 24 08:07:15 crc kubenswrapper[4691]: I1124 08:07:15.902612 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:15 crc kubenswrapper[4691]: I1124 08:07:15.902631 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:15 crc kubenswrapper[4691]: I1124 08:07:15.902645 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:15 crc kubenswrapper[4691]: I1124 08:07:15.941650 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:15 crc kubenswrapper[4691]: I1124 08:07:15.942373 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:15 crc kubenswrapper[4691]: I1124 08:07:15.972649 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" podStartSLOduration=7.972628686 podStartE2EDuration="7.972628686s" podCreationTimestamp="2025-11-24 08:07:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:07:15.968493706 +0000 UTC m=+597.967442965" watchObservedRunningTime="2025-11-24 08:07:15.972628686 +0000 UTC m=+597.971577935" Nov 24 08:07:19 crc kubenswrapper[4691]: I1124 08:07:19.000572 4691 scope.go:117] "RemoveContainer" containerID="ce04c23a0bfb4dd3ca78ba9c16fc588942c0ac0c32675e14b8ac96ad61f32c10" Nov 24 08:07:19 crc kubenswrapper[4691]: I1124 08:07:19.033616 4691 scope.go:117] "RemoveContainer" containerID="9ec1c7e8cb1e60b48c17bb0941fbe1535d397249f2ca05dd1af653a169aa8612" Nov 24 08:07:19 crc kubenswrapper[4691]: I1124 08:07:19.073597 4691 scope.go:117] "RemoveContainer" containerID="d2fe7e8123aeaf275afa9ad3f208b5c39433b15d906dbad0c06cd75f8a8183aa" Nov 24 08:07:19 crc kubenswrapper[4691]: I1124 08:07:19.761027 4691 scope.go:117] "RemoveContainer" containerID="b88a5444a724c9be6f939634f2ae4dedc6fb1554307eb43642e1e6350e8cc201" Nov 24 08:07:19 crc kubenswrapper[4691]: E1124 08:07:19.761243 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-gxxrf_openshift-multus(b2332a73-f85c-470c-9209-c5e5cd1bc3a1)\"" pod="openshift-multus/multus-gxxrf" podUID="b2332a73-f85c-470c-9209-c5e5cd1bc3a1" Nov 24 08:07:19 crc kubenswrapper[4691]: I1124 08:07:19.930007 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-gxxrf_b2332a73-f85c-470c-9209-c5e5cd1bc3a1/kube-multus/2.log" Nov 24 08:07:21 crc kubenswrapper[4691]: I1124 08:07:21.089267 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:07:21 crc kubenswrapper[4691]: I1124 08:07:21.089375 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:07:21 crc kubenswrapper[4691]: I1124 08:07:21.089478 4691 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 08:07:21 crc kubenswrapper[4691]: I1124 08:07:21.090126 4691 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2f66d4866e1d53d3e1351796c2aebb19bcfe0badbd5b9c37eb4d97650922dfd8"} pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 08:07:21 crc kubenswrapper[4691]: I1124 08:07:21.090186 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" containerID="cri-o://2f66d4866e1d53d3e1351796c2aebb19bcfe0badbd5b9c37eb4d97650922dfd8" gracePeriod=600 Nov 24 08:07:21 crc kubenswrapper[4691]: I1124 08:07:21.946131 4691 generic.go:334] "Generic (PLEG): container finished" podID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerID="2f66d4866e1d53d3e1351796c2aebb19bcfe0badbd5b9c37eb4d97650922dfd8" exitCode=0 Nov 24 08:07:21 crc kubenswrapper[4691]: I1124 08:07:21.946231 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerDied","Data":"2f66d4866e1d53d3e1351796c2aebb19bcfe0badbd5b9c37eb4d97650922dfd8"} Nov 24 08:07:21 crc kubenswrapper[4691]: I1124 08:07:21.946912 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerStarted","Data":"4adbbde14ca91fb132e770900c2c7d789c1a43897b472649dcf3666cd980576b"} Nov 24 08:07:21 crc kubenswrapper[4691]: I1124 08:07:21.946944 4691 scope.go:117] "RemoveContainer" containerID="2696471643d2e0fe14b54a335aee3091d21a0ad84005def235cb124eca7c95b3" Nov 24 08:07:34 crc kubenswrapper[4691]: I1124 08:07:34.761604 4691 scope.go:117] "RemoveContainer" containerID="b88a5444a724c9be6f939634f2ae4dedc6fb1554307eb43642e1e6350e8cc201" Nov 24 08:07:35 crc kubenswrapper[4691]: I1124 08:07:35.040297 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-gxxrf_b2332a73-f85c-470c-9209-c5e5cd1bc3a1/kube-multus/2.log" Nov 24 08:07:35 crc kubenswrapper[4691]: I1124 08:07:35.040856 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-gxxrf" event={"ID":"b2332a73-f85c-470c-9209-c5e5cd1bc3a1","Type":"ContainerStarted","Data":"fc4f5a178489fd32b2ecde23101e23038e94ad3a116b12c2366683c10127fc83"} Nov 24 08:07:38 crc kubenswrapper[4691]: I1124 08:07:38.778619 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-h7pwp" Nov 24 08:07:47 crc kubenswrapper[4691]: I1124 08:07:47.232545 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l"] Nov 24 08:07:47 crc kubenswrapper[4691]: I1124 08:07:47.234099 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l" Nov 24 08:07:47 crc kubenswrapper[4691]: I1124 08:07:47.236164 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 24 08:07:47 crc kubenswrapper[4691]: I1124 08:07:47.249319 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l"] Nov 24 08:07:47 crc kubenswrapper[4691]: I1124 08:07:47.370504 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/05d371c2-4e47-4da1-bfc9-b53160c5d377-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l\" (UID: \"05d371c2-4e47-4da1-bfc9-b53160c5d377\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l" Nov 24 08:07:47 crc kubenswrapper[4691]: I1124 08:07:47.370567 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/05d371c2-4e47-4da1-bfc9-b53160c5d377-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l\" (UID: \"05d371c2-4e47-4da1-bfc9-b53160c5d377\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l" Nov 24 08:07:47 crc kubenswrapper[4691]: I1124 08:07:47.370607 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6mzc\" (UniqueName: \"kubernetes.io/projected/05d371c2-4e47-4da1-bfc9-b53160c5d377-kube-api-access-l6mzc\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l\" (UID: \"05d371c2-4e47-4da1-bfc9-b53160c5d377\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l" Nov 24 08:07:47 crc kubenswrapper[4691]: I1124 08:07:47.472657 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/05d371c2-4e47-4da1-bfc9-b53160c5d377-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l\" (UID: \"05d371c2-4e47-4da1-bfc9-b53160c5d377\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l" Nov 24 08:07:47 crc kubenswrapper[4691]: I1124 08:07:47.472751 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/05d371c2-4e47-4da1-bfc9-b53160c5d377-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l\" (UID: \"05d371c2-4e47-4da1-bfc9-b53160c5d377\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l" Nov 24 08:07:47 crc kubenswrapper[4691]: I1124 08:07:47.472831 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l6mzc\" (UniqueName: \"kubernetes.io/projected/05d371c2-4e47-4da1-bfc9-b53160c5d377-kube-api-access-l6mzc\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l\" (UID: \"05d371c2-4e47-4da1-bfc9-b53160c5d377\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l" Nov 24 08:07:47 crc kubenswrapper[4691]: I1124 08:07:47.473326 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/05d371c2-4e47-4da1-bfc9-b53160c5d377-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l\" (UID: \"05d371c2-4e47-4da1-bfc9-b53160c5d377\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l" Nov 24 08:07:47 crc kubenswrapper[4691]: I1124 08:07:47.473529 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/05d371c2-4e47-4da1-bfc9-b53160c5d377-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l\" (UID: \"05d371c2-4e47-4da1-bfc9-b53160c5d377\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l" Nov 24 08:07:47 crc kubenswrapper[4691]: I1124 08:07:47.502065 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l6mzc\" (UniqueName: \"kubernetes.io/projected/05d371c2-4e47-4da1-bfc9-b53160c5d377-kube-api-access-l6mzc\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l\" (UID: \"05d371c2-4e47-4da1-bfc9-b53160c5d377\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l" Nov 24 08:07:47 crc kubenswrapper[4691]: I1124 08:07:47.551933 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l" Nov 24 08:07:47 crc kubenswrapper[4691]: I1124 08:07:47.802599 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l"] Nov 24 08:07:48 crc kubenswrapper[4691]: I1124 08:07:48.124243 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l" event={"ID":"05d371c2-4e47-4da1-bfc9-b53160c5d377","Type":"ContainerStarted","Data":"3d0a3e4355b0fdca67ec7249f414ee32b22f72526a812042ce29544699c8080e"} Nov 24 08:07:48 crc kubenswrapper[4691]: I1124 08:07:48.124769 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l" event={"ID":"05d371c2-4e47-4da1-bfc9-b53160c5d377","Type":"ContainerStarted","Data":"3efa7d7de24edb9418c47a38493a2d2d9aff0f53ac64195ae3dc27ecb14e1574"} Nov 24 08:07:49 crc kubenswrapper[4691]: I1124 08:07:49.133740 4691 generic.go:334] "Generic (PLEG): container finished" podID="05d371c2-4e47-4da1-bfc9-b53160c5d377" containerID="3d0a3e4355b0fdca67ec7249f414ee32b22f72526a812042ce29544699c8080e" exitCode=0 Nov 24 08:07:49 crc kubenswrapper[4691]: I1124 08:07:49.133828 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l" event={"ID":"05d371c2-4e47-4da1-bfc9-b53160c5d377","Type":"ContainerDied","Data":"3d0a3e4355b0fdca67ec7249f414ee32b22f72526a812042ce29544699c8080e"} Nov 24 08:07:51 crc kubenswrapper[4691]: I1124 08:07:51.151639 4691 generic.go:334] "Generic (PLEG): container finished" podID="05d371c2-4e47-4da1-bfc9-b53160c5d377" containerID="2686e275b18cd99b9d1cd69c440ece779962f979c1fc9d98af125134bbee8221" exitCode=0 Nov 24 08:07:51 crc kubenswrapper[4691]: I1124 08:07:51.151748 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l" event={"ID":"05d371c2-4e47-4da1-bfc9-b53160c5d377","Type":"ContainerDied","Data":"2686e275b18cd99b9d1cd69c440ece779962f979c1fc9d98af125134bbee8221"} Nov 24 08:07:52 crc kubenswrapper[4691]: I1124 08:07:52.163796 4691 generic.go:334] "Generic (PLEG): container finished" podID="05d371c2-4e47-4da1-bfc9-b53160c5d377" containerID="4fc9b76f8beed24f84414c70b2e53b5c8616008e4afb7ed905174189c8df50e1" exitCode=0 Nov 24 08:07:52 crc kubenswrapper[4691]: I1124 08:07:52.163869 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l" event={"ID":"05d371c2-4e47-4da1-bfc9-b53160c5d377","Type":"ContainerDied","Data":"4fc9b76f8beed24f84414c70b2e53b5c8616008e4afb7ed905174189c8df50e1"} Nov 24 08:07:53 crc kubenswrapper[4691]: I1124 08:07:53.534951 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l" Nov 24 08:07:53 crc kubenswrapper[4691]: I1124 08:07:53.659876 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l6mzc\" (UniqueName: \"kubernetes.io/projected/05d371c2-4e47-4da1-bfc9-b53160c5d377-kube-api-access-l6mzc\") pod \"05d371c2-4e47-4da1-bfc9-b53160c5d377\" (UID: \"05d371c2-4e47-4da1-bfc9-b53160c5d377\") " Nov 24 08:07:53 crc kubenswrapper[4691]: I1124 08:07:53.659961 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/05d371c2-4e47-4da1-bfc9-b53160c5d377-util\") pod \"05d371c2-4e47-4da1-bfc9-b53160c5d377\" (UID: \"05d371c2-4e47-4da1-bfc9-b53160c5d377\") " Nov 24 08:07:53 crc kubenswrapper[4691]: I1124 08:07:53.660116 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/05d371c2-4e47-4da1-bfc9-b53160c5d377-bundle\") pod \"05d371c2-4e47-4da1-bfc9-b53160c5d377\" (UID: \"05d371c2-4e47-4da1-bfc9-b53160c5d377\") " Nov 24 08:07:53 crc kubenswrapper[4691]: I1124 08:07:53.661757 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/05d371c2-4e47-4da1-bfc9-b53160c5d377-bundle" (OuterVolumeSpecName: "bundle") pod "05d371c2-4e47-4da1-bfc9-b53160c5d377" (UID: "05d371c2-4e47-4da1-bfc9-b53160c5d377"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:07:53 crc kubenswrapper[4691]: I1124 08:07:53.666911 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05d371c2-4e47-4da1-bfc9-b53160c5d377-kube-api-access-l6mzc" (OuterVolumeSpecName: "kube-api-access-l6mzc") pod "05d371c2-4e47-4da1-bfc9-b53160c5d377" (UID: "05d371c2-4e47-4da1-bfc9-b53160c5d377"). InnerVolumeSpecName "kube-api-access-l6mzc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:07:53 crc kubenswrapper[4691]: I1124 08:07:53.679861 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/05d371c2-4e47-4da1-bfc9-b53160c5d377-util" (OuterVolumeSpecName: "util") pod "05d371c2-4e47-4da1-bfc9-b53160c5d377" (UID: "05d371c2-4e47-4da1-bfc9-b53160c5d377"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:07:53 crc kubenswrapper[4691]: I1124 08:07:53.761905 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l6mzc\" (UniqueName: \"kubernetes.io/projected/05d371c2-4e47-4da1-bfc9-b53160c5d377-kube-api-access-l6mzc\") on node \"crc\" DevicePath \"\"" Nov 24 08:07:53 crc kubenswrapper[4691]: I1124 08:07:53.761974 4691 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/05d371c2-4e47-4da1-bfc9-b53160c5d377-util\") on node \"crc\" DevicePath \"\"" Nov 24 08:07:53 crc kubenswrapper[4691]: I1124 08:07:53.762003 4691 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/05d371c2-4e47-4da1-bfc9-b53160c5d377-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:07:54 crc kubenswrapper[4691]: I1124 08:07:54.179815 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l" event={"ID":"05d371c2-4e47-4da1-bfc9-b53160c5d377","Type":"ContainerDied","Data":"3efa7d7de24edb9418c47a38493a2d2d9aff0f53ac64195ae3dc27ecb14e1574"} Nov 24 08:07:54 crc kubenswrapper[4691]: I1124 08:07:54.180250 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3efa7d7de24edb9418c47a38493a2d2d9aff0f53ac64195ae3dc27ecb14e1574" Nov 24 08:07:54 crc kubenswrapper[4691]: I1124 08:07:54.180137 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l" Nov 24 08:07:56 crc kubenswrapper[4691]: I1124 08:07:56.126418 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-5fxr5"] Nov 24 08:07:56 crc kubenswrapper[4691]: E1124 08:07:56.128742 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05d371c2-4e47-4da1-bfc9-b53160c5d377" containerName="util" Nov 24 08:07:56 crc kubenswrapper[4691]: I1124 08:07:56.128839 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="05d371c2-4e47-4da1-bfc9-b53160c5d377" containerName="util" Nov 24 08:07:56 crc kubenswrapper[4691]: E1124 08:07:56.128910 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05d371c2-4e47-4da1-bfc9-b53160c5d377" containerName="pull" Nov 24 08:07:56 crc kubenswrapper[4691]: I1124 08:07:56.128960 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="05d371c2-4e47-4da1-bfc9-b53160c5d377" containerName="pull" Nov 24 08:07:56 crc kubenswrapper[4691]: E1124 08:07:56.129012 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05d371c2-4e47-4da1-bfc9-b53160c5d377" containerName="extract" Nov 24 08:07:56 crc kubenswrapper[4691]: I1124 08:07:56.129064 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="05d371c2-4e47-4da1-bfc9-b53160c5d377" containerName="extract" Nov 24 08:07:56 crc kubenswrapper[4691]: I1124 08:07:56.129237 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="05d371c2-4e47-4da1-bfc9-b53160c5d377" containerName="extract" Nov 24 08:07:56 crc kubenswrapper[4691]: I1124 08:07:56.129826 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-5fxr5" Nov 24 08:07:56 crc kubenswrapper[4691]: I1124 08:07:56.131976 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 24 08:07:56 crc kubenswrapper[4691]: I1124 08:07:56.132650 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-2dlcn" Nov 24 08:07:56 crc kubenswrapper[4691]: I1124 08:07:56.132758 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 24 08:07:56 crc kubenswrapper[4691]: I1124 08:07:56.144493 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-5fxr5"] Nov 24 08:07:56 crc kubenswrapper[4691]: I1124 08:07:56.305387 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9zl9\" (UniqueName: \"kubernetes.io/projected/cd1dce5a-f168-4208-879b-f4132bf30307-kube-api-access-s9zl9\") pod \"nmstate-operator-557fdffb88-5fxr5\" (UID: \"cd1dce5a-f168-4208-879b-f4132bf30307\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-5fxr5" Nov 24 08:07:56 crc kubenswrapper[4691]: I1124 08:07:56.407196 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9zl9\" (UniqueName: \"kubernetes.io/projected/cd1dce5a-f168-4208-879b-f4132bf30307-kube-api-access-s9zl9\") pod \"nmstate-operator-557fdffb88-5fxr5\" (UID: \"cd1dce5a-f168-4208-879b-f4132bf30307\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-5fxr5" Nov 24 08:07:56 crc kubenswrapper[4691]: I1124 08:07:56.441434 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9zl9\" (UniqueName: \"kubernetes.io/projected/cd1dce5a-f168-4208-879b-f4132bf30307-kube-api-access-s9zl9\") pod \"nmstate-operator-557fdffb88-5fxr5\" (UID: \"cd1dce5a-f168-4208-879b-f4132bf30307\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-5fxr5" Nov 24 08:07:56 crc kubenswrapper[4691]: I1124 08:07:56.479601 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-5fxr5" Nov 24 08:07:56 crc kubenswrapper[4691]: I1124 08:07:56.693324 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-5fxr5"] Nov 24 08:07:57 crc kubenswrapper[4691]: I1124 08:07:57.201875 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-5fxr5" event={"ID":"cd1dce5a-f168-4208-879b-f4132bf30307","Type":"ContainerStarted","Data":"89e48a13579bff07c318232e309cb48f8b1758d9dbf9adc46070e1abe218f974"} Nov 24 08:07:59 crc kubenswrapper[4691]: I1124 08:07:59.217313 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-5fxr5" event={"ID":"cd1dce5a-f168-4208-879b-f4132bf30307","Type":"ContainerStarted","Data":"5cdb7c7e729ea5191ddd1b4d557bcfd66a7b71dc82528cb2759d4cbb2553dac5"} Nov 24 08:07:59 crc kubenswrapper[4691]: I1124 08:07:59.234748 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-557fdffb88-5fxr5" podStartSLOduration=0.997371949 podStartE2EDuration="3.234724951s" podCreationTimestamp="2025-11-24 08:07:56 +0000 UTC" firstStartedPulling="2025-11-24 08:07:56.703004686 +0000 UTC m=+638.701953935" lastFinishedPulling="2025-11-24 08:07:58.940357688 +0000 UTC m=+640.939306937" observedRunningTime="2025-11-24 08:07:59.232231999 +0000 UTC m=+641.231181248" watchObservedRunningTime="2025-11-24 08:07:59.234724951 +0000 UTC m=+641.233674200" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.142033 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-fjpwj"] Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.142975 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fjpwj" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.148207 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-sqftz" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.157357 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-fjpwj"] Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.165009 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-67lmm\" (UniqueName: \"kubernetes.io/projected/6fc26c17-4027-42aa-821e-b3e5c1f92226-kube-api-access-67lmm\") pod \"nmstate-metrics-5dcf9c57c5-fjpwj\" (UID: \"6fc26c17-4027-42aa-821e-b3e5c1f92226\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fjpwj" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.173835 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-xq22b"] Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.174589 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-xq22b" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.177413 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.193828 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-xq22b"] Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.202105 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-sk5rc"] Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.202833 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-sk5rc" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.265968 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/17c32358-060b-4f32-abec-0eac2e40eca1-nmstate-lock\") pod \"nmstate-handler-sk5rc\" (UID: \"17c32358-060b-4f32-abec-0eac2e40eca1\") " pod="openshift-nmstate/nmstate-handler-sk5rc" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.266003 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/17c32358-060b-4f32-abec-0eac2e40eca1-ovs-socket\") pod \"nmstate-handler-sk5rc\" (UID: \"17c32358-060b-4f32-abec-0eac2e40eca1\") " pod="openshift-nmstate/nmstate-handler-sk5rc" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.266031 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qt5gx\" (UniqueName: \"kubernetes.io/projected/17c32358-060b-4f32-abec-0eac2e40eca1-kube-api-access-qt5gx\") pod \"nmstate-handler-sk5rc\" (UID: \"17c32358-060b-4f32-abec-0eac2e40eca1\") " pod="openshift-nmstate/nmstate-handler-sk5rc" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.266068 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qgpn\" (UniqueName: \"kubernetes.io/projected/d877fe85-0260-4e8f-89c9-ad96a8466bee-kube-api-access-4qgpn\") pod \"nmstate-webhook-6b89b748d8-xq22b\" (UID: \"d877fe85-0260-4e8f-89c9-ad96a8466bee\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-xq22b" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.266104 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/d877fe85-0260-4e8f-89c9-ad96a8466bee-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-xq22b\" (UID: \"d877fe85-0260-4e8f-89c9-ad96a8466bee\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-xq22b" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.266126 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/17c32358-060b-4f32-abec-0eac2e40eca1-dbus-socket\") pod \"nmstate-handler-sk5rc\" (UID: \"17c32358-060b-4f32-abec-0eac2e40eca1\") " pod="openshift-nmstate/nmstate-handler-sk5rc" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.266149 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-67lmm\" (UniqueName: \"kubernetes.io/projected/6fc26c17-4027-42aa-821e-b3e5c1f92226-kube-api-access-67lmm\") pod \"nmstate-metrics-5dcf9c57c5-fjpwj\" (UID: \"6fc26c17-4027-42aa-821e-b3e5c1f92226\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fjpwj" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.292051 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-67lmm\" (UniqueName: \"kubernetes.io/projected/6fc26c17-4027-42aa-821e-b3e5c1f92226-kube-api-access-67lmm\") pod \"nmstate-metrics-5dcf9c57c5-fjpwj\" (UID: \"6fc26c17-4027-42aa-821e-b3e5c1f92226\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fjpwj" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.317999 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-nrlp6"] Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.318834 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-nrlp6" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.320383 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-czmkf" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.320756 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.321770 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.350129 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-nrlp6"] Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.367063 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4flm8\" (UniqueName: \"kubernetes.io/projected/8a7fc372-f01f-497b-b1bd-c508371d6069-kube-api-access-4flm8\") pod \"nmstate-console-plugin-5874bd7bc5-nrlp6\" (UID: \"8a7fc372-f01f-497b-b1bd-c508371d6069\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-nrlp6" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.367127 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/d877fe85-0260-4e8f-89c9-ad96a8466bee-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-xq22b\" (UID: \"d877fe85-0260-4e8f-89c9-ad96a8466bee\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-xq22b" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.367153 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/8a7fc372-f01f-497b-b1bd-c508371d6069-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-nrlp6\" (UID: \"8a7fc372-f01f-497b-b1bd-c508371d6069\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-nrlp6" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.367176 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/17c32358-060b-4f32-abec-0eac2e40eca1-dbus-socket\") pod \"nmstate-handler-sk5rc\" (UID: \"17c32358-060b-4f32-abec-0eac2e40eca1\") " pod="openshift-nmstate/nmstate-handler-sk5rc" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.367288 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/17c32358-060b-4f32-abec-0eac2e40eca1-nmstate-lock\") pod \"nmstate-handler-sk5rc\" (UID: \"17c32358-060b-4f32-abec-0eac2e40eca1\") " pod="openshift-nmstate/nmstate-handler-sk5rc" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.367333 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/17c32358-060b-4f32-abec-0eac2e40eca1-ovs-socket\") pod \"nmstate-handler-sk5rc\" (UID: \"17c32358-060b-4f32-abec-0eac2e40eca1\") " pod="openshift-nmstate/nmstate-handler-sk5rc" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.367370 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/8a7fc372-f01f-497b-b1bd-c508371d6069-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-nrlp6\" (UID: \"8a7fc372-f01f-497b-b1bd-c508371d6069\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-nrlp6" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.367440 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qt5gx\" (UniqueName: \"kubernetes.io/projected/17c32358-060b-4f32-abec-0eac2e40eca1-kube-api-access-qt5gx\") pod \"nmstate-handler-sk5rc\" (UID: \"17c32358-060b-4f32-abec-0eac2e40eca1\") " pod="openshift-nmstate/nmstate-handler-sk5rc" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.367479 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/17c32358-060b-4f32-abec-0eac2e40eca1-ovs-socket\") pod \"nmstate-handler-sk5rc\" (UID: \"17c32358-060b-4f32-abec-0eac2e40eca1\") " pod="openshift-nmstate/nmstate-handler-sk5rc" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.367520 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qgpn\" (UniqueName: \"kubernetes.io/projected/d877fe85-0260-4e8f-89c9-ad96a8466bee-kube-api-access-4qgpn\") pod \"nmstate-webhook-6b89b748d8-xq22b\" (UID: \"d877fe85-0260-4e8f-89c9-ad96a8466bee\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-xq22b" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.367432 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/17c32358-060b-4f32-abec-0eac2e40eca1-nmstate-lock\") pod \"nmstate-handler-sk5rc\" (UID: \"17c32358-060b-4f32-abec-0eac2e40eca1\") " pod="openshift-nmstate/nmstate-handler-sk5rc" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.367520 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/17c32358-060b-4f32-abec-0eac2e40eca1-dbus-socket\") pod \"nmstate-handler-sk5rc\" (UID: \"17c32358-060b-4f32-abec-0eac2e40eca1\") " pod="openshift-nmstate/nmstate-handler-sk5rc" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.385888 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/d877fe85-0260-4e8f-89c9-ad96a8466bee-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-xq22b\" (UID: \"d877fe85-0260-4e8f-89c9-ad96a8466bee\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-xq22b" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.386976 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qt5gx\" (UniqueName: \"kubernetes.io/projected/17c32358-060b-4f32-abec-0eac2e40eca1-kube-api-access-qt5gx\") pod \"nmstate-handler-sk5rc\" (UID: \"17c32358-060b-4f32-abec-0eac2e40eca1\") " pod="openshift-nmstate/nmstate-handler-sk5rc" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.389336 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qgpn\" (UniqueName: \"kubernetes.io/projected/d877fe85-0260-4e8f-89c9-ad96a8466bee-kube-api-access-4qgpn\") pod \"nmstate-webhook-6b89b748d8-xq22b\" (UID: \"d877fe85-0260-4e8f-89c9-ad96a8466bee\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-xq22b" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.458183 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fjpwj" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.468178 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/8a7fc372-f01f-497b-b1bd-c508371d6069-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-nrlp6\" (UID: \"8a7fc372-f01f-497b-b1bd-c508371d6069\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-nrlp6" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.468270 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4flm8\" (UniqueName: \"kubernetes.io/projected/8a7fc372-f01f-497b-b1bd-c508371d6069-kube-api-access-4flm8\") pod \"nmstate-console-plugin-5874bd7bc5-nrlp6\" (UID: \"8a7fc372-f01f-497b-b1bd-c508371d6069\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-nrlp6" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.468319 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/8a7fc372-f01f-497b-b1bd-c508371d6069-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-nrlp6\" (UID: \"8a7fc372-f01f-497b-b1bd-c508371d6069\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-nrlp6" Nov 24 08:08:00 crc kubenswrapper[4691]: E1124 08:08:00.468389 4691 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Nov 24 08:08:00 crc kubenswrapper[4691]: E1124 08:08:00.468501 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8a7fc372-f01f-497b-b1bd-c508371d6069-plugin-serving-cert podName:8a7fc372-f01f-497b-b1bd-c508371d6069 nodeName:}" failed. No retries permitted until 2025-11-24 08:08:00.968478531 +0000 UTC m=+642.967427780 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/8a7fc372-f01f-497b-b1bd-c508371d6069-plugin-serving-cert") pod "nmstate-console-plugin-5874bd7bc5-nrlp6" (UID: "8a7fc372-f01f-497b-b1bd-c508371d6069") : secret "plugin-serving-cert" not found Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.469328 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/8a7fc372-f01f-497b-b1bd-c508371d6069-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-nrlp6\" (UID: \"8a7fc372-f01f-497b-b1bd-c508371d6069\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-nrlp6" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.499912 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-xq22b" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.502230 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4flm8\" (UniqueName: \"kubernetes.io/projected/8a7fc372-f01f-497b-b1bd-c508371d6069-kube-api-access-4flm8\") pod \"nmstate-console-plugin-5874bd7bc5-nrlp6\" (UID: \"8a7fc372-f01f-497b-b1bd-c508371d6069\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-nrlp6" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.519969 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-sk5rc" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.627512 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-7cbf967b4c-4mwqx"] Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.628570 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7cbf967b4c-4mwqx" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.655468 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7cbf967b4c-4mwqx"] Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.774662 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/04fb5688-abae-4685-a42f-49e1adb8a1b2-console-serving-cert\") pod \"console-7cbf967b4c-4mwqx\" (UID: \"04fb5688-abae-4685-a42f-49e1adb8a1b2\") " pod="openshift-console/console-7cbf967b4c-4mwqx" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.775094 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/04fb5688-abae-4685-a42f-49e1adb8a1b2-trusted-ca-bundle\") pod \"console-7cbf967b4c-4mwqx\" (UID: \"04fb5688-abae-4685-a42f-49e1adb8a1b2\") " pod="openshift-console/console-7cbf967b4c-4mwqx" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.775131 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6kxc5\" (UniqueName: \"kubernetes.io/projected/04fb5688-abae-4685-a42f-49e1adb8a1b2-kube-api-access-6kxc5\") pod \"console-7cbf967b4c-4mwqx\" (UID: \"04fb5688-abae-4685-a42f-49e1adb8a1b2\") " pod="openshift-console/console-7cbf967b4c-4mwqx" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.775164 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/04fb5688-abae-4685-a42f-49e1adb8a1b2-oauth-serving-cert\") pod \"console-7cbf967b4c-4mwqx\" (UID: \"04fb5688-abae-4685-a42f-49e1adb8a1b2\") " pod="openshift-console/console-7cbf967b4c-4mwqx" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.775193 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/04fb5688-abae-4685-a42f-49e1adb8a1b2-console-config\") pod \"console-7cbf967b4c-4mwqx\" (UID: \"04fb5688-abae-4685-a42f-49e1adb8a1b2\") " pod="openshift-console/console-7cbf967b4c-4mwqx" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.775233 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/04fb5688-abae-4685-a42f-49e1adb8a1b2-service-ca\") pod \"console-7cbf967b4c-4mwqx\" (UID: \"04fb5688-abae-4685-a42f-49e1adb8a1b2\") " pod="openshift-console/console-7cbf967b4c-4mwqx" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.775271 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/04fb5688-abae-4685-a42f-49e1adb8a1b2-console-oauth-config\") pod \"console-7cbf967b4c-4mwqx\" (UID: \"04fb5688-abae-4685-a42f-49e1adb8a1b2\") " pod="openshift-console/console-7cbf967b4c-4mwqx" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.820373 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-fjpwj"] Nov 24 08:08:00 crc kubenswrapper[4691]: W1124 08:08:00.823878 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6fc26c17_4027_42aa_821e_b3e5c1f92226.slice/crio-bb875156dfad4ce29ec61ed56cd9a05b5123504b640b3daa98675a925db9b658 WatchSource:0}: Error finding container bb875156dfad4ce29ec61ed56cd9a05b5123504b640b3daa98675a925db9b658: Status 404 returned error can't find the container with id bb875156dfad4ce29ec61ed56cd9a05b5123504b640b3daa98675a925db9b658 Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.876253 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/04fb5688-abae-4685-a42f-49e1adb8a1b2-console-serving-cert\") pod \"console-7cbf967b4c-4mwqx\" (UID: \"04fb5688-abae-4685-a42f-49e1adb8a1b2\") " pod="openshift-console/console-7cbf967b4c-4mwqx" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.876315 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/04fb5688-abae-4685-a42f-49e1adb8a1b2-trusted-ca-bundle\") pod \"console-7cbf967b4c-4mwqx\" (UID: \"04fb5688-abae-4685-a42f-49e1adb8a1b2\") " pod="openshift-console/console-7cbf967b4c-4mwqx" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.876355 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6kxc5\" (UniqueName: \"kubernetes.io/projected/04fb5688-abae-4685-a42f-49e1adb8a1b2-kube-api-access-6kxc5\") pod \"console-7cbf967b4c-4mwqx\" (UID: \"04fb5688-abae-4685-a42f-49e1adb8a1b2\") " pod="openshift-console/console-7cbf967b4c-4mwqx" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.876392 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/04fb5688-abae-4685-a42f-49e1adb8a1b2-oauth-serving-cert\") pod \"console-7cbf967b4c-4mwqx\" (UID: \"04fb5688-abae-4685-a42f-49e1adb8a1b2\") " pod="openshift-console/console-7cbf967b4c-4mwqx" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.876491 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/04fb5688-abae-4685-a42f-49e1adb8a1b2-console-config\") pod \"console-7cbf967b4c-4mwqx\" (UID: \"04fb5688-abae-4685-a42f-49e1adb8a1b2\") " pod="openshift-console/console-7cbf967b4c-4mwqx" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.876543 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/04fb5688-abae-4685-a42f-49e1adb8a1b2-service-ca\") pod \"console-7cbf967b4c-4mwqx\" (UID: \"04fb5688-abae-4685-a42f-49e1adb8a1b2\") " pod="openshift-console/console-7cbf967b4c-4mwqx" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.876606 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/04fb5688-abae-4685-a42f-49e1adb8a1b2-console-oauth-config\") pod \"console-7cbf967b4c-4mwqx\" (UID: \"04fb5688-abae-4685-a42f-49e1adb8a1b2\") " pod="openshift-console/console-7cbf967b4c-4mwqx" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.876871 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-xq22b"] Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.879735 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/04fb5688-abae-4685-a42f-49e1adb8a1b2-console-config\") pod \"console-7cbf967b4c-4mwqx\" (UID: \"04fb5688-abae-4685-a42f-49e1adb8a1b2\") " pod="openshift-console/console-7cbf967b4c-4mwqx" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.879816 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/04fb5688-abae-4685-a42f-49e1adb8a1b2-service-ca\") pod \"console-7cbf967b4c-4mwqx\" (UID: \"04fb5688-abae-4685-a42f-49e1adb8a1b2\") " pod="openshift-console/console-7cbf967b4c-4mwqx" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.879837 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/04fb5688-abae-4685-a42f-49e1adb8a1b2-oauth-serving-cert\") pod \"console-7cbf967b4c-4mwqx\" (UID: \"04fb5688-abae-4685-a42f-49e1adb8a1b2\") " pod="openshift-console/console-7cbf967b4c-4mwqx" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.879946 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/04fb5688-abae-4685-a42f-49e1adb8a1b2-trusted-ca-bundle\") pod \"console-7cbf967b4c-4mwqx\" (UID: \"04fb5688-abae-4685-a42f-49e1adb8a1b2\") " pod="openshift-console/console-7cbf967b4c-4mwqx" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.881803 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/04fb5688-abae-4685-a42f-49e1adb8a1b2-console-serving-cert\") pod \"console-7cbf967b4c-4mwqx\" (UID: \"04fb5688-abae-4685-a42f-49e1adb8a1b2\") " pod="openshift-console/console-7cbf967b4c-4mwqx" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.882320 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/04fb5688-abae-4685-a42f-49e1adb8a1b2-console-oauth-config\") pod \"console-7cbf967b4c-4mwqx\" (UID: \"04fb5688-abae-4685-a42f-49e1adb8a1b2\") " pod="openshift-console/console-7cbf967b4c-4mwqx" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.894104 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6kxc5\" (UniqueName: \"kubernetes.io/projected/04fb5688-abae-4685-a42f-49e1adb8a1b2-kube-api-access-6kxc5\") pod \"console-7cbf967b4c-4mwqx\" (UID: \"04fb5688-abae-4685-a42f-49e1adb8a1b2\") " pod="openshift-console/console-7cbf967b4c-4mwqx" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.966673 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7cbf967b4c-4mwqx" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.977749 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/8a7fc372-f01f-497b-b1bd-c508371d6069-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-nrlp6\" (UID: \"8a7fc372-f01f-497b-b1bd-c508371d6069\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-nrlp6" Nov 24 08:08:00 crc kubenswrapper[4691]: I1124 08:08:00.981946 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/8a7fc372-f01f-497b-b1bd-c508371d6069-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-nrlp6\" (UID: \"8a7fc372-f01f-497b-b1bd-c508371d6069\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-nrlp6" Nov 24 08:08:01 crc kubenswrapper[4691]: I1124 08:08:01.173133 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7cbf967b4c-4mwqx"] Nov 24 08:08:01 crc kubenswrapper[4691]: W1124 08:08:01.187126 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod04fb5688_abae_4685_a42f_49e1adb8a1b2.slice/crio-99876b494d134b3be7f44cad7c1702141de4e6c617140a8a52ab2f94be23ca74 WatchSource:0}: Error finding container 99876b494d134b3be7f44cad7c1702141de4e6c617140a8a52ab2f94be23ca74: Status 404 returned error can't find the container with id 99876b494d134b3be7f44cad7c1702141de4e6c617140a8a52ab2f94be23ca74 Nov 24 08:08:01 crc kubenswrapper[4691]: I1124 08:08:01.239300 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-nrlp6" Nov 24 08:08:01 crc kubenswrapper[4691]: I1124 08:08:01.240958 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7cbf967b4c-4mwqx" event={"ID":"04fb5688-abae-4685-a42f-49e1adb8a1b2","Type":"ContainerStarted","Data":"99876b494d134b3be7f44cad7c1702141de4e6c617140a8a52ab2f94be23ca74"} Nov 24 08:08:01 crc kubenswrapper[4691]: I1124 08:08:01.243121 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fjpwj" event={"ID":"6fc26c17-4027-42aa-821e-b3e5c1f92226","Type":"ContainerStarted","Data":"bb875156dfad4ce29ec61ed56cd9a05b5123504b640b3daa98675a925db9b658"} Nov 24 08:08:01 crc kubenswrapper[4691]: I1124 08:08:01.246843 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-sk5rc" event={"ID":"17c32358-060b-4f32-abec-0eac2e40eca1","Type":"ContainerStarted","Data":"c0cdb12e6380c3308db4bff875b10570cc2d50d0fd5efc0e928fc167be5d7ffb"} Nov 24 08:08:01 crc kubenswrapper[4691]: I1124 08:08:01.247984 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-xq22b" event={"ID":"d877fe85-0260-4e8f-89c9-ad96a8466bee","Type":"ContainerStarted","Data":"ec76e5a10dd15cd9ad2b676d0b9ccd39f83afcc26c670595f89026cbe07c25e8"} Nov 24 08:08:01 crc kubenswrapper[4691]: I1124 08:08:01.441417 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-nrlp6"] Nov 24 08:08:01 crc kubenswrapper[4691]: W1124 08:08:01.449199 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8a7fc372_f01f_497b_b1bd_c508371d6069.slice/crio-4c0deaea31d79c5bc32519baba99cb5120b3a4f72163ef840c78ebabaaa728ad WatchSource:0}: Error finding container 4c0deaea31d79c5bc32519baba99cb5120b3a4f72163ef840c78ebabaaa728ad: Status 404 returned error can't find the container with id 4c0deaea31d79c5bc32519baba99cb5120b3a4f72163ef840c78ebabaaa728ad Nov 24 08:08:02 crc kubenswrapper[4691]: I1124 08:08:02.258504 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7cbf967b4c-4mwqx" event={"ID":"04fb5688-abae-4685-a42f-49e1adb8a1b2","Type":"ContainerStarted","Data":"1cb40f7250e6d2ef6be05d202a9d19c535f0c5ad727951ec1545c5ec3e92bed2"} Nov 24 08:08:02 crc kubenswrapper[4691]: I1124 08:08:02.261439 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-nrlp6" event={"ID":"8a7fc372-f01f-497b-b1bd-c508371d6069","Type":"ContainerStarted","Data":"4c0deaea31d79c5bc32519baba99cb5120b3a4f72163ef840c78ebabaaa728ad"} Nov 24 08:08:02 crc kubenswrapper[4691]: I1124 08:08:02.282308 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-7cbf967b4c-4mwqx" podStartSLOduration=2.282274712 podStartE2EDuration="2.282274712s" podCreationTimestamp="2025-11-24 08:08:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:08:02.277347999 +0000 UTC m=+644.276297248" watchObservedRunningTime="2025-11-24 08:08:02.282274712 +0000 UTC m=+644.281223961" Nov 24 08:08:03 crc kubenswrapper[4691]: I1124 08:08:03.271327 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-xq22b" event={"ID":"d877fe85-0260-4e8f-89c9-ad96a8466bee","Type":"ContainerStarted","Data":"ba4f689a85dca472c6e66e6f1c432e3570e3d7d55c316103f6ba9969c467d223"} Nov 24 08:08:03 crc kubenswrapper[4691]: I1124 08:08:03.271758 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-xq22b" Nov 24 08:08:03 crc kubenswrapper[4691]: I1124 08:08:03.274085 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fjpwj" event={"ID":"6fc26c17-4027-42aa-821e-b3e5c1f92226","Type":"ContainerStarted","Data":"3ee914fb4778248b17f01209579b5e3078c136ee150da46363f852d5e192ffb2"} Nov 24 08:08:03 crc kubenswrapper[4691]: I1124 08:08:03.290622 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-xq22b" podStartSLOduration=1.162424267 podStartE2EDuration="3.290604014s" podCreationTimestamp="2025-11-24 08:08:00 +0000 UTC" firstStartedPulling="2025-11-24 08:08:00.885826411 +0000 UTC m=+642.884775660" lastFinishedPulling="2025-11-24 08:08:03.014006158 +0000 UTC m=+645.012955407" observedRunningTime="2025-11-24 08:08:03.289692398 +0000 UTC m=+645.288641647" watchObservedRunningTime="2025-11-24 08:08:03.290604014 +0000 UTC m=+645.289553263" Nov 24 08:08:04 crc kubenswrapper[4691]: I1124 08:08:04.285326 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-sk5rc" event={"ID":"17c32358-060b-4f32-abec-0eac2e40eca1","Type":"ContainerStarted","Data":"b34b9524f1455dda73b4a6c42d98ebca8ccd71341890a8b0a174aa0893161221"} Nov 24 08:08:04 crc kubenswrapper[4691]: I1124 08:08:04.285883 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-sk5rc" Nov 24 08:08:04 crc kubenswrapper[4691]: I1124 08:08:04.288722 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-nrlp6" event={"ID":"8a7fc372-f01f-497b-b1bd-c508371d6069","Type":"ContainerStarted","Data":"19980e63c83bad46a626eb4d330c01350da0552e70d8c1965848ad3e48c03d3b"} Nov 24 08:08:04 crc kubenswrapper[4691]: I1124 08:08:04.311846 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-sk5rc" podStartSLOduration=1.870067753 podStartE2EDuration="4.311823691s" podCreationTimestamp="2025-11-24 08:08:00 +0000 UTC" firstStartedPulling="2025-11-24 08:08:00.571670733 +0000 UTC m=+642.570619982" lastFinishedPulling="2025-11-24 08:08:03.013426671 +0000 UTC m=+645.012375920" observedRunningTime="2025-11-24 08:08:04.304200389 +0000 UTC m=+646.303149658" watchObservedRunningTime="2025-11-24 08:08:04.311823691 +0000 UTC m=+646.310772950" Nov 24 08:08:04 crc kubenswrapper[4691]: I1124 08:08:04.331069 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-nrlp6" podStartSLOduration=1.750177954 podStartE2EDuration="4.33104348s" podCreationTimestamp="2025-11-24 08:08:00 +0000 UTC" firstStartedPulling="2025-11-24 08:08:01.455799281 +0000 UTC m=+643.454748530" lastFinishedPulling="2025-11-24 08:08:04.036664807 +0000 UTC m=+646.035614056" observedRunningTime="2025-11-24 08:08:04.325212681 +0000 UTC m=+646.324162080" watchObservedRunningTime="2025-11-24 08:08:04.33104348 +0000 UTC m=+646.329992729" Nov 24 08:08:06 crc kubenswrapper[4691]: I1124 08:08:06.301875 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fjpwj" event={"ID":"6fc26c17-4027-42aa-821e-b3e5c1f92226","Type":"ContainerStarted","Data":"920b7a145ccfd60e5a0bedfb494b1092693f7b2873cfef24258b9e3a976df97d"} Nov 24 08:08:06 crc kubenswrapper[4691]: I1124 08:08:06.330591 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fjpwj" podStartSLOduration=1.399992969 podStartE2EDuration="6.330566705s" podCreationTimestamp="2025-11-24 08:08:00 +0000 UTC" firstStartedPulling="2025-11-24 08:08:00.828536515 +0000 UTC m=+642.827485764" lastFinishedPulling="2025-11-24 08:08:05.759110231 +0000 UTC m=+647.758059500" observedRunningTime="2025-11-24 08:08:06.326308881 +0000 UTC m=+648.325258170" watchObservedRunningTime="2025-11-24 08:08:06.330566705 +0000 UTC m=+648.329515964" Nov 24 08:08:10 crc kubenswrapper[4691]: I1124 08:08:10.546714 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-sk5rc" Nov 24 08:08:10 crc kubenswrapper[4691]: I1124 08:08:10.967697 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-7cbf967b4c-4mwqx" Nov 24 08:08:10 crc kubenswrapper[4691]: I1124 08:08:10.967776 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-7cbf967b4c-4mwqx" Nov 24 08:08:10 crc kubenswrapper[4691]: I1124 08:08:10.974862 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-7cbf967b4c-4mwqx" Nov 24 08:08:11 crc kubenswrapper[4691]: I1124 08:08:11.340981 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-7cbf967b4c-4mwqx" Nov 24 08:08:11 crc kubenswrapper[4691]: I1124 08:08:11.399488 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-h9wgf"] Nov 24 08:08:20 crc kubenswrapper[4691]: I1124 08:08:20.511258 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-xq22b" Nov 24 08:08:34 crc kubenswrapper[4691]: I1124 08:08:34.045494 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd"] Nov 24 08:08:34 crc kubenswrapper[4691]: I1124 08:08:34.048287 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd" Nov 24 08:08:34 crc kubenswrapper[4691]: I1124 08:08:34.050649 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 24 08:08:34 crc kubenswrapper[4691]: I1124 08:08:34.054841 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd"] Nov 24 08:08:34 crc kubenswrapper[4691]: I1124 08:08:34.096780 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4nd67\" (UniqueName: \"kubernetes.io/projected/6a028a1d-8792-4943-bbca-370668d5c1b2-kube-api-access-4nd67\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd\" (UID: \"6a028a1d-8792-4943-bbca-370668d5c1b2\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd" Nov 24 08:08:34 crc kubenswrapper[4691]: I1124 08:08:34.096865 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6a028a1d-8792-4943-bbca-370668d5c1b2-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd\" (UID: \"6a028a1d-8792-4943-bbca-370668d5c1b2\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd" Nov 24 08:08:34 crc kubenswrapper[4691]: I1124 08:08:34.096906 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6a028a1d-8792-4943-bbca-370668d5c1b2-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd\" (UID: \"6a028a1d-8792-4943-bbca-370668d5c1b2\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd" Nov 24 08:08:34 crc kubenswrapper[4691]: I1124 08:08:34.198415 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4nd67\" (UniqueName: \"kubernetes.io/projected/6a028a1d-8792-4943-bbca-370668d5c1b2-kube-api-access-4nd67\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd\" (UID: \"6a028a1d-8792-4943-bbca-370668d5c1b2\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd" Nov 24 08:08:34 crc kubenswrapper[4691]: I1124 08:08:34.198522 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6a028a1d-8792-4943-bbca-370668d5c1b2-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd\" (UID: \"6a028a1d-8792-4943-bbca-370668d5c1b2\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd" Nov 24 08:08:34 crc kubenswrapper[4691]: I1124 08:08:34.198556 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6a028a1d-8792-4943-bbca-370668d5c1b2-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd\" (UID: \"6a028a1d-8792-4943-bbca-370668d5c1b2\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd" Nov 24 08:08:34 crc kubenswrapper[4691]: I1124 08:08:34.199258 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6a028a1d-8792-4943-bbca-370668d5c1b2-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd\" (UID: \"6a028a1d-8792-4943-bbca-370668d5c1b2\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd" Nov 24 08:08:34 crc kubenswrapper[4691]: I1124 08:08:34.199474 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6a028a1d-8792-4943-bbca-370668d5c1b2-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd\" (UID: \"6a028a1d-8792-4943-bbca-370668d5c1b2\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd" Nov 24 08:08:34 crc kubenswrapper[4691]: I1124 08:08:34.220608 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4nd67\" (UniqueName: \"kubernetes.io/projected/6a028a1d-8792-4943-bbca-370668d5c1b2-kube-api-access-4nd67\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd\" (UID: \"6a028a1d-8792-4943-bbca-370668d5c1b2\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd" Nov 24 08:08:34 crc kubenswrapper[4691]: I1124 08:08:34.368299 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd" Nov 24 08:08:34 crc kubenswrapper[4691]: I1124 08:08:34.606391 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd"] Nov 24 08:08:35 crc kubenswrapper[4691]: I1124 08:08:35.512822 4691 generic.go:334] "Generic (PLEG): container finished" podID="6a028a1d-8792-4943-bbca-370668d5c1b2" containerID="09b4961419af9198dbe895e952c34a31d136474aeefa2364f10fa94752e775aa" exitCode=0 Nov 24 08:08:35 crc kubenswrapper[4691]: I1124 08:08:35.512871 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd" event={"ID":"6a028a1d-8792-4943-bbca-370668d5c1b2","Type":"ContainerDied","Data":"09b4961419af9198dbe895e952c34a31d136474aeefa2364f10fa94752e775aa"} Nov 24 08:08:35 crc kubenswrapper[4691]: I1124 08:08:35.512898 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd" event={"ID":"6a028a1d-8792-4943-bbca-370668d5c1b2","Type":"ContainerStarted","Data":"bad275e103fea96a4f2f68bc06f0f4ded5dbfc91ae1fc3a6ef90af760aa4c2c3"} Nov 24 08:08:36 crc kubenswrapper[4691]: I1124 08:08:36.456294 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-h9wgf" podUID="d611a7c5-68d8-4ea5-88b7-d3fad9baef65" containerName="console" containerID="cri-o://caa616f09e9f12db47ec83f0816c6db9b7c79a5b8e34cf63015cd230a8fe8e4c" gracePeriod=15 Nov 24 08:08:36 crc kubenswrapper[4691]: I1124 08:08:36.912976 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-h9wgf_d611a7c5-68d8-4ea5-88b7-d3fad9baef65/console/0.log" Nov 24 08:08:36 crc kubenswrapper[4691]: I1124 08:08:36.913648 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-h9wgf" Nov 24 08:08:36 crc kubenswrapper[4691]: I1124 08:08:36.938142 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-console-oauth-config\") pod \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\" (UID: \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\") " Nov 24 08:08:36 crc kubenswrapper[4691]: I1124 08:08:36.938205 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-oauth-serving-cert\") pod \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\" (UID: \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\") " Nov 24 08:08:36 crc kubenswrapper[4691]: I1124 08:08:36.938278 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-console-config\") pod \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\" (UID: \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\") " Nov 24 08:08:36 crc kubenswrapper[4691]: I1124 08:08:36.938312 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-trusted-ca-bundle\") pod \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\" (UID: \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\") " Nov 24 08:08:36 crc kubenswrapper[4691]: I1124 08:08:36.938332 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-service-ca\") pod \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\" (UID: \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\") " Nov 24 08:08:36 crc kubenswrapper[4691]: I1124 08:08:36.939180 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "d611a7c5-68d8-4ea5-88b7-d3fad9baef65" (UID: "d611a7c5-68d8-4ea5-88b7-d3fad9baef65"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:08:36 crc kubenswrapper[4691]: I1124 08:08:36.939227 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "d611a7c5-68d8-4ea5-88b7-d3fad9baef65" (UID: "d611a7c5-68d8-4ea5-88b7-d3fad9baef65"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:08:36 crc kubenswrapper[4691]: I1124 08:08:36.939279 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-service-ca" (OuterVolumeSpecName: "service-ca") pod "d611a7c5-68d8-4ea5-88b7-d3fad9baef65" (UID: "d611a7c5-68d8-4ea5-88b7-d3fad9baef65"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:08:36 crc kubenswrapper[4691]: I1124 08:08:36.939318 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-console-config" (OuterVolumeSpecName: "console-config") pod "d611a7c5-68d8-4ea5-88b7-d3fad9baef65" (UID: "d611a7c5-68d8-4ea5-88b7-d3fad9baef65"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:08:36 crc kubenswrapper[4691]: I1124 08:08:36.939792 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6xjtp\" (UniqueName: \"kubernetes.io/projected/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-kube-api-access-6xjtp\") pod \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\" (UID: \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\") " Nov 24 08:08:36 crc kubenswrapper[4691]: I1124 08:08:36.940282 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-console-serving-cert\") pod \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\" (UID: \"d611a7c5-68d8-4ea5-88b7-d3fad9baef65\") " Nov 24 08:08:36 crc kubenswrapper[4691]: I1124 08:08:36.943872 4691 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-console-config\") on node \"crc\" DevicePath \"\"" Nov 24 08:08:36 crc kubenswrapper[4691]: I1124 08:08:36.943970 4691 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:08:36 crc kubenswrapper[4691]: I1124 08:08:36.943986 4691 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-service-ca\") on node \"crc\" DevicePath \"\"" Nov 24 08:08:36 crc kubenswrapper[4691]: I1124 08:08:36.944001 4691 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 08:08:36 crc kubenswrapper[4691]: I1124 08:08:36.949446 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-kube-api-access-6xjtp" (OuterVolumeSpecName: "kube-api-access-6xjtp") pod "d611a7c5-68d8-4ea5-88b7-d3fad9baef65" (UID: "d611a7c5-68d8-4ea5-88b7-d3fad9baef65"). InnerVolumeSpecName "kube-api-access-6xjtp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:08:36 crc kubenswrapper[4691]: I1124 08:08:36.950735 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "d611a7c5-68d8-4ea5-88b7-d3fad9baef65" (UID: "d611a7c5-68d8-4ea5-88b7-d3fad9baef65"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:08:36 crc kubenswrapper[4691]: I1124 08:08:36.958848 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "d611a7c5-68d8-4ea5-88b7-d3fad9baef65" (UID: "d611a7c5-68d8-4ea5-88b7-d3fad9baef65"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:08:37 crc kubenswrapper[4691]: I1124 08:08:37.046020 4691 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 24 08:08:37 crc kubenswrapper[4691]: I1124 08:08:37.046068 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6xjtp\" (UniqueName: \"kubernetes.io/projected/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-kube-api-access-6xjtp\") on node \"crc\" DevicePath \"\"" Nov 24 08:08:37 crc kubenswrapper[4691]: I1124 08:08:37.046080 4691 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d611a7c5-68d8-4ea5-88b7-d3fad9baef65-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 08:08:37 crc kubenswrapper[4691]: I1124 08:08:37.530849 4691 generic.go:334] "Generic (PLEG): container finished" podID="6a028a1d-8792-4943-bbca-370668d5c1b2" containerID="d5e851b10481257bddef06107276e14ceb247602e0b9ee6abd4372c95c25d280" exitCode=0 Nov 24 08:08:37 crc kubenswrapper[4691]: I1124 08:08:37.530925 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd" event={"ID":"6a028a1d-8792-4943-bbca-370668d5c1b2","Type":"ContainerDied","Data":"d5e851b10481257bddef06107276e14ceb247602e0b9ee6abd4372c95c25d280"} Nov 24 08:08:37 crc kubenswrapper[4691]: I1124 08:08:37.533216 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-h9wgf_d611a7c5-68d8-4ea5-88b7-d3fad9baef65/console/0.log" Nov 24 08:08:37 crc kubenswrapper[4691]: I1124 08:08:37.533302 4691 generic.go:334] "Generic (PLEG): container finished" podID="d611a7c5-68d8-4ea5-88b7-d3fad9baef65" containerID="caa616f09e9f12db47ec83f0816c6db9b7c79a5b8e34cf63015cd230a8fe8e4c" exitCode=2 Nov 24 08:08:37 crc kubenswrapper[4691]: I1124 08:08:37.533351 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-h9wgf" event={"ID":"d611a7c5-68d8-4ea5-88b7-d3fad9baef65","Type":"ContainerDied","Data":"caa616f09e9f12db47ec83f0816c6db9b7c79a5b8e34cf63015cd230a8fe8e4c"} Nov 24 08:08:37 crc kubenswrapper[4691]: I1124 08:08:37.533382 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-h9wgf" Nov 24 08:08:37 crc kubenswrapper[4691]: I1124 08:08:37.533421 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-h9wgf" event={"ID":"d611a7c5-68d8-4ea5-88b7-d3fad9baef65","Type":"ContainerDied","Data":"cc6128be54dc3fe4ece5ffe50125204ac63d63c79095b536dab42d4add999264"} Nov 24 08:08:37 crc kubenswrapper[4691]: I1124 08:08:37.533530 4691 scope.go:117] "RemoveContainer" containerID="caa616f09e9f12db47ec83f0816c6db9b7c79a5b8e34cf63015cd230a8fe8e4c" Nov 24 08:08:37 crc kubenswrapper[4691]: I1124 08:08:37.563999 4691 scope.go:117] "RemoveContainer" containerID="caa616f09e9f12db47ec83f0816c6db9b7c79a5b8e34cf63015cd230a8fe8e4c" Nov 24 08:08:37 crc kubenswrapper[4691]: E1124 08:08:37.564806 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"caa616f09e9f12db47ec83f0816c6db9b7c79a5b8e34cf63015cd230a8fe8e4c\": container with ID starting with caa616f09e9f12db47ec83f0816c6db9b7c79a5b8e34cf63015cd230a8fe8e4c not found: ID does not exist" containerID="caa616f09e9f12db47ec83f0816c6db9b7c79a5b8e34cf63015cd230a8fe8e4c" Nov 24 08:08:37 crc kubenswrapper[4691]: I1124 08:08:37.564942 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"caa616f09e9f12db47ec83f0816c6db9b7c79a5b8e34cf63015cd230a8fe8e4c"} err="failed to get container status \"caa616f09e9f12db47ec83f0816c6db9b7c79a5b8e34cf63015cd230a8fe8e4c\": rpc error: code = NotFound desc = could not find container \"caa616f09e9f12db47ec83f0816c6db9b7c79a5b8e34cf63015cd230a8fe8e4c\": container with ID starting with caa616f09e9f12db47ec83f0816c6db9b7c79a5b8e34cf63015cd230a8fe8e4c not found: ID does not exist" Nov 24 08:08:37 crc kubenswrapper[4691]: I1124 08:08:37.634362 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-h9wgf"] Nov 24 08:08:37 crc kubenswrapper[4691]: I1124 08:08:37.638100 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-h9wgf"] Nov 24 08:08:38 crc kubenswrapper[4691]: I1124 08:08:38.545535 4691 generic.go:334] "Generic (PLEG): container finished" podID="6a028a1d-8792-4943-bbca-370668d5c1b2" containerID="b0874ca1202d33f3782937f9720f39dc7ec93c9cdc667bcd85c64482aeadfd7e" exitCode=0 Nov 24 08:08:38 crc kubenswrapper[4691]: I1124 08:08:38.545677 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd" event={"ID":"6a028a1d-8792-4943-bbca-370668d5c1b2","Type":"ContainerDied","Data":"b0874ca1202d33f3782937f9720f39dc7ec93c9cdc667bcd85c64482aeadfd7e"} Nov 24 08:08:38 crc kubenswrapper[4691]: I1124 08:08:38.772577 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d611a7c5-68d8-4ea5-88b7-d3fad9baef65" path="/var/lib/kubelet/pods/d611a7c5-68d8-4ea5-88b7-d3fad9baef65/volumes" Nov 24 08:08:39 crc kubenswrapper[4691]: I1124 08:08:39.866685 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd" Nov 24 08:08:39 crc kubenswrapper[4691]: I1124 08:08:39.991597 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6a028a1d-8792-4943-bbca-370668d5c1b2-bundle\") pod \"6a028a1d-8792-4943-bbca-370668d5c1b2\" (UID: \"6a028a1d-8792-4943-bbca-370668d5c1b2\") " Nov 24 08:08:39 crc kubenswrapper[4691]: I1124 08:08:39.991830 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6a028a1d-8792-4943-bbca-370668d5c1b2-util\") pod \"6a028a1d-8792-4943-bbca-370668d5c1b2\" (UID: \"6a028a1d-8792-4943-bbca-370668d5c1b2\") " Nov 24 08:08:39 crc kubenswrapper[4691]: I1124 08:08:39.991924 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4nd67\" (UniqueName: \"kubernetes.io/projected/6a028a1d-8792-4943-bbca-370668d5c1b2-kube-api-access-4nd67\") pod \"6a028a1d-8792-4943-bbca-370668d5c1b2\" (UID: \"6a028a1d-8792-4943-bbca-370668d5c1b2\") " Nov 24 08:08:39 crc kubenswrapper[4691]: I1124 08:08:39.993935 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6a028a1d-8792-4943-bbca-370668d5c1b2-bundle" (OuterVolumeSpecName: "bundle") pod "6a028a1d-8792-4943-bbca-370668d5c1b2" (UID: "6a028a1d-8792-4943-bbca-370668d5c1b2"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:08:39 crc kubenswrapper[4691]: I1124 08:08:39.999850 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a028a1d-8792-4943-bbca-370668d5c1b2-kube-api-access-4nd67" (OuterVolumeSpecName: "kube-api-access-4nd67") pod "6a028a1d-8792-4943-bbca-370668d5c1b2" (UID: "6a028a1d-8792-4943-bbca-370668d5c1b2"). InnerVolumeSpecName "kube-api-access-4nd67". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:08:40 crc kubenswrapper[4691]: I1124 08:08:40.094383 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4nd67\" (UniqueName: \"kubernetes.io/projected/6a028a1d-8792-4943-bbca-370668d5c1b2-kube-api-access-4nd67\") on node \"crc\" DevicePath \"\"" Nov 24 08:08:40 crc kubenswrapper[4691]: I1124 08:08:40.094472 4691 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6a028a1d-8792-4943-bbca-370668d5c1b2-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:08:40 crc kubenswrapper[4691]: I1124 08:08:40.189883 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6a028a1d-8792-4943-bbca-370668d5c1b2-util" (OuterVolumeSpecName: "util") pod "6a028a1d-8792-4943-bbca-370668d5c1b2" (UID: "6a028a1d-8792-4943-bbca-370668d5c1b2"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:08:40 crc kubenswrapper[4691]: I1124 08:08:40.195748 4691 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6a028a1d-8792-4943-bbca-370668d5c1b2-util\") on node \"crc\" DevicePath \"\"" Nov 24 08:08:40 crc kubenswrapper[4691]: I1124 08:08:40.566334 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd" event={"ID":"6a028a1d-8792-4943-bbca-370668d5c1b2","Type":"ContainerDied","Data":"bad275e103fea96a4f2f68bc06f0f4ded5dbfc91ae1fc3a6ef90af760aa4c2c3"} Nov 24 08:08:40 crc kubenswrapper[4691]: I1124 08:08:40.566379 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bad275e103fea96a4f2f68bc06f0f4ded5dbfc91ae1fc3a6ef90af760aa4c2c3" Nov 24 08:08:40 crc kubenswrapper[4691]: I1124 08:08:40.566463 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.095370 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-86b4f5566f-xknt4"] Nov 24 08:08:49 crc kubenswrapper[4691]: E1124 08:08:49.096269 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a028a1d-8792-4943-bbca-370668d5c1b2" containerName="extract" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.096284 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a028a1d-8792-4943-bbca-370668d5c1b2" containerName="extract" Nov 24 08:08:49 crc kubenswrapper[4691]: E1124 08:08:49.096299 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a028a1d-8792-4943-bbca-370668d5c1b2" containerName="pull" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.096305 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a028a1d-8792-4943-bbca-370668d5c1b2" containerName="pull" Nov 24 08:08:49 crc kubenswrapper[4691]: E1124 08:08:49.096316 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a028a1d-8792-4943-bbca-370668d5c1b2" containerName="util" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.096323 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a028a1d-8792-4943-bbca-370668d5c1b2" containerName="util" Nov 24 08:08:49 crc kubenswrapper[4691]: E1124 08:08:49.096340 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d611a7c5-68d8-4ea5-88b7-d3fad9baef65" containerName="console" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.096345 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="d611a7c5-68d8-4ea5-88b7-d3fad9baef65" containerName="console" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.096438 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="d611a7c5-68d8-4ea5-88b7-d3fad9baef65" containerName="console" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.096471 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a028a1d-8792-4943-bbca-370668d5c1b2" containerName="extract" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.096895 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-86b4f5566f-xknt4" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.099907 4691 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-hm27f" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.100332 4691 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.100392 4691 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.101474 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.102870 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.121400 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-86b4f5566f-xknt4"] Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.264005 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/5c353a67-2f3f-4608-a19e-406c31bae85a-apiservice-cert\") pod \"metallb-operator-controller-manager-86b4f5566f-xknt4\" (UID: \"5c353a67-2f3f-4608-a19e-406c31bae85a\") " pod="metallb-system/metallb-operator-controller-manager-86b4f5566f-xknt4" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.264116 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nwfkt\" (UniqueName: \"kubernetes.io/projected/5c353a67-2f3f-4608-a19e-406c31bae85a-kube-api-access-nwfkt\") pod \"metallb-operator-controller-manager-86b4f5566f-xknt4\" (UID: \"5c353a67-2f3f-4608-a19e-406c31bae85a\") " pod="metallb-system/metallb-operator-controller-manager-86b4f5566f-xknt4" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.264171 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/5c353a67-2f3f-4608-a19e-406c31bae85a-webhook-cert\") pod \"metallb-operator-controller-manager-86b4f5566f-xknt4\" (UID: \"5c353a67-2f3f-4608-a19e-406c31bae85a\") " pod="metallb-system/metallb-operator-controller-manager-86b4f5566f-xknt4" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.365325 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/5c353a67-2f3f-4608-a19e-406c31bae85a-webhook-cert\") pod \"metallb-operator-controller-manager-86b4f5566f-xknt4\" (UID: \"5c353a67-2f3f-4608-a19e-406c31bae85a\") " pod="metallb-system/metallb-operator-controller-manager-86b4f5566f-xknt4" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.366593 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/5c353a67-2f3f-4608-a19e-406c31bae85a-apiservice-cert\") pod \"metallb-operator-controller-manager-86b4f5566f-xknt4\" (UID: \"5c353a67-2f3f-4608-a19e-406c31bae85a\") " pod="metallb-system/metallb-operator-controller-manager-86b4f5566f-xknt4" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.366991 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nwfkt\" (UniqueName: \"kubernetes.io/projected/5c353a67-2f3f-4608-a19e-406c31bae85a-kube-api-access-nwfkt\") pod \"metallb-operator-controller-manager-86b4f5566f-xknt4\" (UID: \"5c353a67-2f3f-4608-a19e-406c31bae85a\") " pod="metallb-system/metallb-operator-controller-manager-86b4f5566f-xknt4" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.371782 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/5c353a67-2f3f-4608-a19e-406c31bae85a-webhook-cert\") pod \"metallb-operator-controller-manager-86b4f5566f-xknt4\" (UID: \"5c353a67-2f3f-4608-a19e-406c31bae85a\") " pod="metallb-system/metallb-operator-controller-manager-86b4f5566f-xknt4" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.372092 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/5c353a67-2f3f-4608-a19e-406c31bae85a-apiservice-cert\") pod \"metallb-operator-controller-manager-86b4f5566f-xknt4\" (UID: \"5c353a67-2f3f-4608-a19e-406c31bae85a\") " pod="metallb-system/metallb-operator-controller-manager-86b4f5566f-xknt4" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.386584 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nwfkt\" (UniqueName: \"kubernetes.io/projected/5c353a67-2f3f-4608-a19e-406c31bae85a-kube-api-access-nwfkt\") pod \"metallb-operator-controller-manager-86b4f5566f-xknt4\" (UID: \"5c353a67-2f3f-4608-a19e-406c31bae85a\") " pod="metallb-system/metallb-operator-controller-manager-86b4f5566f-xknt4" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.414803 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-86b4f5566f-xknt4" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.479219 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-577d7cd9f7-t5x6j"] Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.480336 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-577d7cd9f7-t5x6j" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.483506 4691 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.484012 4691 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-lhbt6" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.484249 4691 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.508132 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-577d7cd9f7-t5x6j"] Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.570214 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rv9vw\" (UniqueName: \"kubernetes.io/projected/42e2daa4-034f-4fe6-852e-479d1a2570bb-kube-api-access-rv9vw\") pod \"metallb-operator-webhook-server-577d7cd9f7-t5x6j\" (UID: \"42e2daa4-034f-4fe6-852e-479d1a2570bb\") " pod="metallb-system/metallb-operator-webhook-server-577d7cd9f7-t5x6j" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.570329 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/42e2daa4-034f-4fe6-852e-479d1a2570bb-webhook-cert\") pod \"metallb-operator-webhook-server-577d7cd9f7-t5x6j\" (UID: \"42e2daa4-034f-4fe6-852e-479d1a2570bb\") " pod="metallb-system/metallb-operator-webhook-server-577d7cd9f7-t5x6j" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.570466 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/42e2daa4-034f-4fe6-852e-479d1a2570bb-apiservice-cert\") pod \"metallb-operator-webhook-server-577d7cd9f7-t5x6j\" (UID: \"42e2daa4-034f-4fe6-852e-479d1a2570bb\") " pod="metallb-system/metallb-operator-webhook-server-577d7cd9f7-t5x6j" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.672929 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rv9vw\" (UniqueName: \"kubernetes.io/projected/42e2daa4-034f-4fe6-852e-479d1a2570bb-kube-api-access-rv9vw\") pod \"metallb-operator-webhook-server-577d7cd9f7-t5x6j\" (UID: \"42e2daa4-034f-4fe6-852e-479d1a2570bb\") " pod="metallb-system/metallb-operator-webhook-server-577d7cd9f7-t5x6j" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.673027 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/42e2daa4-034f-4fe6-852e-479d1a2570bb-webhook-cert\") pod \"metallb-operator-webhook-server-577d7cd9f7-t5x6j\" (UID: \"42e2daa4-034f-4fe6-852e-479d1a2570bb\") " pod="metallb-system/metallb-operator-webhook-server-577d7cd9f7-t5x6j" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.673064 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/42e2daa4-034f-4fe6-852e-479d1a2570bb-apiservice-cert\") pod \"metallb-operator-webhook-server-577d7cd9f7-t5x6j\" (UID: \"42e2daa4-034f-4fe6-852e-479d1a2570bb\") " pod="metallb-system/metallb-operator-webhook-server-577d7cd9f7-t5x6j" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.679050 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/42e2daa4-034f-4fe6-852e-479d1a2570bb-webhook-cert\") pod \"metallb-operator-webhook-server-577d7cd9f7-t5x6j\" (UID: \"42e2daa4-034f-4fe6-852e-479d1a2570bb\") " pod="metallb-system/metallb-operator-webhook-server-577d7cd9f7-t5x6j" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.681634 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/42e2daa4-034f-4fe6-852e-479d1a2570bb-apiservice-cert\") pod \"metallb-operator-webhook-server-577d7cd9f7-t5x6j\" (UID: \"42e2daa4-034f-4fe6-852e-479d1a2570bb\") " pod="metallb-system/metallb-operator-webhook-server-577d7cd9f7-t5x6j" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.698002 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rv9vw\" (UniqueName: \"kubernetes.io/projected/42e2daa4-034f-4fe6-852e-479d1a2570bb-kube-api-access-rv9vw\") pod \"metallb-operator-webhook-server-577d7cd9f7-t5x6j\" (UID: \"42e2daa4-034f-4fe6-852e-479d1a2570bb\") " pod="metallb-system/metallb-operator-webhook-server-577d7cd9f7-t5x6j" Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.745191 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-86b4f5566f-xknt4"] Nov 24 08:08:49 crc kubenswrapper[4691]: I1124 08:08:49.799721 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-577d7cd9f7-t5x6j" Nov 24 08:08:50 crc kubenswrapper[4691]: I1124 08:08:50.302746 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-577d7cd9f7-t5x6j"] Nov 24 08:08:50 crc kubenswrapper[4691]: W1124 08:08:50.310952 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod42e2daa4_034f_4fe6_852e_479d1a2570bb.slice/crio-8ef2f56e78c619bf31e443f9b27e3868a230967c3ed4643b5e79cfaeb4f06f1f WatchSource:0}: Error finding container 8ef2f56e78c619bf31e443f9b27e3868a230967c3ed4643b5e79cfaeb4f06f1f: Status 404 returned error can't find the container with id 8ef2f56e78c619bf31e443f9b27e3868a230967c3ed4643b5e79cfaeb4f06f1f Nov 24 08:08:50 crc kubenswrapper[4691]: I1124 08:08:50.647032 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-86b4f5566f-xknt4" event={"ID":"5c353a67-2f3f-4608-a19e-406c31bae85a","Type":"ContainerStarted","Data":"07cc4e96a972101e0ad78d1de89e74f87be618420ec8000e8243f15ea2e1d8c0"} Nov 24 08:08:50 crc kubenswrapper[4691]: I1124 08:08:50.647946 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-577d7cd9f7-t5x6j" event={"ID":"42e2daa4-034f-4fe6-852e-479d1a2570bb","Type":"ContainerStarted","Data":"8ef2f56e78c619bf31e443f9b27e3868a230967c3ed4643b5e79cfaeb4f06f1f"} Nov 24 08:08:53 crc kubenswrapper[4691]: I1124 08:08:53.681855 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-86b4f5566f-xknt4" event={"ID":"5c353a67-2f3f-4608-a19e-406c31bae85a","Type":"ContainerStarted","Data":"150148d19149f999ccc6613b0f8bf9704d055b297a6e6af1be391b24d6740b70"} Nov 24 08:08:53 crc kubenswrapper[4691]: I1124 08:08:53.682333 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-86b4f5566f-xknt4" Nov 24 08:08:53 crc kubenswrapper[4691]: I1124 08:08:53.703290 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-86b4f5566f-xknt4" podStartSLOduration=1.917571083 podStartE2EDuration="4.703271804s" podCreationTimestamp="2025-11-24 08:08:49 +0000 UTC" firstStartedPulling="2025-11-24 08:08:49.779486265 +0000 UTC m=+691.778435514" lastFinishedPulling="2025-11-24 08:08:52.565186986 +0000 UTC m=+694.564136235" observedRunningTime="2025-11-24 08:08:53.700960557 +0000 UTC m=+695.699909806" watchObservedRunningTime="2025-11-24 08:08:53.703271804 +0000 UTC m=+695.702221053" Nov 24 08:08:55 crc kubenswrapper[4691]: I1124 08:08:55.712963 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-577d7cd9f7-t5x6j" event={"ID":"42e2daa4-034f-4fe6-852e-479d1a2570bb","Type":"ContainerStarted","Data":"05b56d7f8ca2e0d21b1eabacd65d7ae00643f09f7e515a99859f44c41a09dcf1"} Nov 24 08:08:55 crc kubenswrapper[4691]: I1124 08:08:55.713972 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-577d7cd9f7-t5x6j" Nov 24 08:08:55 crc kubenswrapper[4691]: I1124 08:08:55.733749 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-577d7cd9f7-t5x6j" podStartSLOduration=1.8254424710000001 podStartE2EDuration="6.733719682s" podCreationTimestamp="2025-11-24 08:08:49 +0000 UTC" firstStartedPulling="2025-11-24 08:08:50.313880727 +0000 UTC m=+692.312829976" lastFinishedPulling="2025-11-24 08:08:55.222157938 +0000 UTC m=+697.221107187" observedRunningTime="2025-11-24 08:08:55.733685261 +0000 UTC m=+697.732634530" watchObservedRunningTime="2025-11-24 08:08:55.733719682 +0000 UTC m=+697.732668961" Nov 24 08:09:09 crc kubenswrapper[4691]: I1124 08:09:09.808487 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-577d7cd9f7-t5x6j" Nov 24 08:09:21 crc kubenswrapper[4691]: I1124 08:09:21.089812 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:09:21 crc kubenswrapper[4691]: I1124 08:09:21.090693 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:09:29 crc kubenswrapper[4691]: I1124 08:09:29.418309 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-86b4f5566f-xknt4" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.060164 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-28hsg"] Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.085369 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-28hsg" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.087603 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-bbnct"] Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.089619 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-bbnct" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.102235 4691 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-n6zvp" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.102467 4691 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.107429 4691 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.108776 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.110843 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-bbnct"] Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.164460 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-b884p"] Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.165430 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-b884p" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.168644 4691 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.168953 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.171173 4691 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-89pnp" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.171474 4691 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.178153 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/428c03ad-0ec5-4e31-84e1-1a30cba68bc7-frr-sockets\") pod \"frr-k8s-28hsg\" (UID: \"428c03ad-0ec5-4e31-84e1-1a30cba68bc7\") " pod="metallb-system/frr-k8s-28hsg" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.178238 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e740c0fa-a972-42fe-8e95-aaed01b46916-cert\") pod \"frr-k8s-webhook-server-6998585d5-bbnct\" (UID: \"e740c0fa-a972-42fe-8e95-aaed01b46916\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-bbnct" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.178277 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rk5k2\" (UniqueName: \"kubernetes.io/projected/e740c0fa-a972-42fe-8e95-aaed01b46916-kube-api-access-rk5k2\") pod \"frr-k8s-webhook-server-6998585d5-bbnct\" (UID: \"e740c0fa-a972-42fe-8e95-aaed01b46916\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-bbnct" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.178320 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/428c03ad-0ec5-4e31-84e1-1a30cba68bc7-metrics\") pod \"frr-k8s-28hsg\" (UID: \"428c03ad-0ec5-4e31-84e1-1a30cba68bc7\") " pod="metallb-system/frr-k8s-28hsg" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.178364 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8zfv\" (UniqueName: \"kubernetes.io/projected/428c03ad-0ec5-4e31-84e1-1a30cba68bc7-kube-api-access-g8zfv\") pod \"frr-k8s-28hsg\" (UID: \"428c03ad-0ec5-4e31-84e1-1a30cba68bc7\") " pod="metallb-system/frr-k8s-28hsg" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.178489 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/428c03ad-0ec5-4e31-84e1-1a30cba68bc7-frr-conf\") pod \"frr-k8s-28hsg\" (UID: \"428c03ad-0ec5-4e31-84e1-1a30cba68bc7\") " pod="metallb-system/frr-k8s-28hsg" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.178556 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/428c03ad-0ec5-4e31-84e1-1a30cba68bc7-metrics-certs\") pod \"frr-k8s-28hsg\" (UID: \"428c03ad-0ec5-4e31-84e1-1a30cba68bc7\") " pod="metallb-system/frr-k8s-28hsg" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.178599 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/428c03ad-0ec5-4e31-84e1-1a30cba68bc7-frr-startup\") pod \"frr-k8s-28hsg\" (UID: \"428c03ad-0ec5-4e31-84e1-1a30cba68bc7\") " pod="metallb-system/frr-k8s-28hsg" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.178692 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/428c03ad-0ec5-4e31-84e1-1a30cba68bc7-reloader\") pod \"frr-k8s-28hsg\" (UID: \"428c03ad-0ec5-4e31-84e1-1a30cba68bc7\") " pod="metallb-system/frr-k8s-28hsg" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.195276 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6c7b4b5f48-wgxjz"] Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.196710 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-wgxjz" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.201173 4691 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.209908 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-wgxjz"] Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.280470 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e76d55fd-c894-4236-8921-8b60a88125f7-metrics-certs\") pod \"speaker-b884p\" (UID: \"e76d55fd-c894-4236-8921-8b60a88125f7\") " pod="metallb-system/speaker-b884p" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.280545 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8a2bb6bf-c15d-40aa-9af4-b4c55f67acff-cert\") pod \"controller-6c7b4b5f48-wgxjz\" (UID: \"8a2bb6bf-c15d-40aa-9af4-b4c55f67acff\") " pod="metallb-system/controller-6c7b4b5f48-wgxjz" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.280589 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/428c03ad-0ec5-4e31-84e1-1a30cba68bc7-metrics\") pod \"frr-k8s-28hsg\" (UID: \"428c03ad-0ec5-4e31-84e1-1a30cba68bc7\") " pod="metallb-system/frr-k8s-28hsg" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.280714 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8a2bb6bf-c15d-40aa-9af4-b4c55f67acff-metrics-certs\") pod \"controller-6c7b4b5f48-wgxjz\" (UID: \"8a2bb6bf-c15d-40aa-9af4-b4c55f67acff\") " pod="metallb-system/controller-6c7b4b5f48-wgxjz" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.280793 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g8zfv\" (UniqueName: \"kubernetes.io/projected/428c03ad-0ec5-4e31-84e1-1a30cba68bc7-kube-api-access-g8zfv\") pod \"frr-k8s-28hsg\" (UID: \"428c03ad-0ec5-4e31-84e1-1a30cba68bc7\") " pod="metallb-system/frr-k8s-28hsg" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.280825 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/428c03ad-0ec5-4e31-84e1-1a30cba68bc7-frr-conf\") pod \"frr-k8s-28hsg\" (UID: \"428c03ad-0ec5-4e31-84e1-1a30cba68bc7\") " pod="metallb-system/frr-k8s-28hsg" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.280856 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/e76d55fd-c894-4236-8921-8b60a88125f7-memberlist\") pod \"speaker-b884p\" (UID: \"e76d55fd-c894-4236-8921-8b60a88125f7\") " pod="metallb-system/speaker-b884p" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.280895 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/428c03ad-0ec5-4e31-84e1-1a30cba68bc7-metrics-certs\") pod \"frr-k8s-28hsg\" (UID: \"428c03ad-0ec5-4e31-84e1-1a30cba68bc7\") " pod="metallb-system/frr-k8s-28hsg" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.280936 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/428c03ad-0ec5-4e31-84e1-1a30cba68bc7-frr-startup\") pod \"frr-k8s-28hsg\" (UID: \"428c03ad-0ec5-4e31-84e1-1a30cba68bc7\") " pod="metallb-system/frr-k8s-28hsg" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.281018 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/428c03ad-0ec5-4e31-84e1-1a30cba68bc7-metrics\") pod \"frr-k8s-28hsg\" (UID: \"428c03ad-0ec5-4e31-84e1-1a30cba68bc7\") " pod="metallb-system/frr-k8s-28hsg" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.281040 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nr7tn\" (UniqueName: \"kubernetes.io/projected/8a2bb6bf-c15d-40aa-9af4-b4c55f67acff-kube-api-access-nr7tn\") pod \"controller-6c7b4b5f48-wgxjz\" (UID: \"8a2bb6bf-c15d-40aa-9af4-b4c55f67acff\") " pod="metallb-system/controller-6c7b4b5f48-wgxjz" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.281082 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/428c03ad-0ec5-4e31-84e1-1a30cba68bc7-reloader\") pod \"frr-k8s-28hsg\" (UID: \"428c03ad-0ec5-4e31-84e1-1a30cba68bc7\") " pod="metallb-system/frr-k8s-28hsg" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.281117 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/428c03ad-0ec5-4e31-84e1-1a30cba68bc7-frr-sockets\") pod \"frr-k8s-28hsg\" (UID: \"428c03ad-0ec5-4e31-84e1-1a30cba68bc7\") " pod="metallb-system/frr-k8s-28hsg" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.281144 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/e76d55fd-c894-4236-8921-8b60a88125f7-metallb-excludel2\") pod \"speaker-b884p\" (UID: \"e76d55fd-c894-4236-8921-8b60a88125f7\") " pod="metallb-system/speaker-b884p" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.281187 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jk4ks\" (UniqueName: \"kubernetes.io/projected/e76d55fd-c894-4236-8921-8b60a88125f7-kube-api-access-jk4ks\") pod \"speaker-b884p\" (UID: \"e76d55fd-c894-4236-8921-8b60a88125f7\") " pod="metallb-system/speaker-b884p" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.281233 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/428c03ad-0ec5-4e31-84e1-1a30cba68bc7-frr-conf\") pod \"frr-k8s-28hsg\" (UID: \"428c03ad-0ec5-4e31-84e1-1a30cba68bc7\") " pod="metallb-system/frr-k8s-28hsg" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.281244 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e740c0fa-a972-42fe-8e95-aaed01b46916-cert\") pod \"frr-k8s-webhook-server-6998585d5-bbnct\" (UID: \"e740c0fa-a972-42fe-8e95-aaed01b46916\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-bbnct" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.281312 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rk5k2\" (UniqueName: \"kubernetes.io/projected/e740c0fa-a972-42fe-8e95-aaed01b46916-kube-api-access-rk5k2\") pod \"frr-k8s-webhook-server-6998585d5-bbnct\" (UID: \"e740c0fa-a972-42fe-8e95-aaed01b46916\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-bbnct" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.281936 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/428c03ad-0ec5-4e31-84e1-1a30cba68bc7-reloader\") pod \"frr-k8s-28hsg\" (UID: \"428c03ad-0ec5-4e31-84e1-1a30cba68bc7\") " pod="metallb-system/frr-k8s-28hsg" Nov 24 08:09:30 crc kubenswrapper[4691]: E1124 08:09:30.281978 4691 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Nov 24 08:09:30 crc kubenswrapper[4691]: E1124 08:09:30.282012 4691 secret.go:188] Couldn't get secret metallb-system/frr-k8s-webhook-server-cert: secret "frr-k8s-webhook-server-cert" not found Nov 24 08:09:30 crc kubenswrapper[4691]: E1124 08:09:30.282088 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/428c03ad-0ec5-4e31-84e1-1a30cba68bc7-metrics-certs podName:428c03ad-0ec5-4e31-84e1-1a30cba68bc7 nodeName:}" failed. No retries permitted until 2025-11-24 08:09:30.782055517 +0000 UTC m=+732.781004766 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/428c03ad-0ec5-4e31-84e1-1a30cba68bc7-metrics-certs") pod "frr-k8s-28hsg" (UID: "428c03ad-0ec5-4e31-84e1-1a30cba68bc7") : secret "frr-k8s-certs-secret" not found Nov 24 08:09:30 crc kubenswrapper[4691]: E1124 08:09:30.282110 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e740c0fa-a972-42fe-8e95-aaed01b46916-cert podName:e740c0fa-a972-42fe-8e95-aaed01b46916 nodeName:}" failed. No retries permitted until 2025-11-24 08:09:30.782101328 +0000 UTC m=+732.781050577 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/e740c0fa-a972-42fe-8e95-aaed01b46916-cert") pod "frr-k8s-webhook-server-6998585d5-bbnct" (UID: "e740c0fa-a972-42fe-8e95-aaed01b46916") : secret "frr-k8s-webhook-server-cert" not found Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.282169 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/428c03ad-0ec5-4e31-84e1-1a30cba68bc7-frr-sockets\") pod \"frr-k8s-28hsg\" (UID: \"428c03ad-0ec5-4e31-84e1-1a30cba68bc7\") " pod="metallb-system/frr-k8s-28hsg" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.283257 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/428c03ad-0ec5-4e31-84e1-1a30cba68bc7-frr-startup\") pod \"frr-k8s-28hsg\" (UID: \"428c03ad-0ec5-4e31-84e1-1a30cba68bc7\") " pod="metallb-system/frr-k8s-28hsg" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.302949 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8zfv\" (UniqueName: \"kubernetes.io/projected/428c03ad-0ec5-4e31-84e1-1a30cba68bc7-kube-api-access-g8zfv\") pod \"frr-k8s-28hsg\" (UID: \"428c03ad-0ec5-4e31-84e1-1a30cba68bc7\") " pod="metallb-system/frr-k8s-28hsg" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.316312 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rk5k2\" (UniqueName: \"kubernetes.io/projected/e740c0fa-a972-42fe-8e95-aaed01b46916-kube-api-access-rk5k2\") pod \"frr-k8s-webhook-server-6998585d5-bbnct\" (UID: \"e740c0fa-a972-42fe-8e95-aaed01b46916\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-bbnct" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.382753 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e76d55fd-c894-4236-8921-8b60a88125f7-metrics-certs\") pod \"speaker-b884p\" (UID: \"e76d55fd-c894-4236-8921-8b60a88125f7\") " pod="metallb-system/speaker-b884p" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.383150 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8a2bb6bf-c15d-40aa-9af4-b4c55f67acff-cert\") pod \"controller-6c7b4b5f48-wgxjz\" (UID: \"8a2bb6bf-c15d-40aa-9af4-b4c55f67acff\") " pod="metallb-system/controller-6c7b4b5f48-wgxjz" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.383273 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8a2bb6bf-c15d-40aa-9af4-b4c55f67acff-metrics-certs\") pod \"controller-6c7b4b5f48-wgxjz\" (UID: \"8a2bb6bf-c15d-40aa-9af4-b4c55f67acff\") " pod="metallb-system/controller-6c7b4b5f48-wgxjz" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.383375 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/e76d55fd-c894-4236-8921-8b60a88125f7-memberlist\") pod \"speaker-b884p\" (UID: \"e76d55fd-c894-4236-8921-8b60a88125f7\") " pod="metallb-system/speaker-b884p" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.383540 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nr7tn\" (UniqueName: \"kubernetes.io/projected/8a2bb6bf-c15d-40aa-9af4-b4c55f67acff-kube-api-access-nr7tn\") pod \"controller-6c7b4b5f48-wgxjz\" (UID: \"8a2bb6bf-c15d-40aa-9af4-b4c55f67acff\") " pod="metallb-system/controller-6c7b4b5f48-wgxjz" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.383634 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/e76d55fd-c894-4236-8921-8b60a88125f7-metallb-excludel2\") pod \"speaker-b884p\" (UID: \"e76d55fd-c894-4236-8921-8b60a88125f7\") " pod="metallb-system/speaker-b884p" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.383724 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jk4ks\" (UniqueName: \"kubernetes.io/projected/e76d55fd-c894-4236-8921-8b60a88125f7-kube-api-access-jk4ks\") pod \"speaker-b884p\" (UID: \"e76d55fd-c894-4236-8921-8b60a88125f7\") " pod="metallb-system/speaker-b884p" Nov 24 08:09:30 crc kubenswrapper[4691]: E1124 08:09:30.383655 4691 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 24 08:09:30 crc kubenswrapper[4691]: E1124 08:09:30.384018 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e76d55fd-c894-4236-8921-8b60a88125f7-memberlist podName:e76d55fd-c894-4236-8921-8b60a88125f7 nodeName:}" failed. No retries permitted until 2025-11-24 08:09:30.88397496 +0000 UTC m=+732.882924389 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/e76d55fd-c894-4236-8921-8b60a88125f7-memberlist") pod "speaker-b884p" (UID: "e76d55fd-c894-4236-8921-8b60a88125f7") : secret "metallb-memberlist" not found Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.384655 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/e76d55fd-c894-4236-8921-8b60a88125f7-metallb-excludel2\") pod \"speaker-b884p\" (UID: \"e76d55fd-c894-4236-8921-8b60a88125f7\") " pod="metallb-system/speaker-b884p" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.386249 4691 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.386824 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8a2bb6bf-c15d-40aa-9af4-b4c55f67acff-metrics-certs\") pod \"controller-6c7b4b5f48-wgxjz\" (UID: \"8a2bb6bf-c15d-40aa-9af4-b4c55f67acff\") " pod="metallb-system/controller-6c7b4b5f48-wgxjz" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.386942 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e76d55fd-c894-4236-8921-8b60a88125f7-metrics-certs\") pod \"speaker-b884p\" (UID: \"e76d55fd-c894-4236-8921-8b60a88125f7\") " pod="metallb-system/speaker-b884p" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.397550 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8a2bb6bf-c15d-40aa-9af4-b4c55f67acff-cert\") pod \"controller-6c7b4b5f48-wgxjz\" (UID: \"8a2bb6bf-c15d-40aa-9af4-b4c55f67acff\") " pod="metallb-system/controller-6c7b4b5f48-wgxjz" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.401828 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jk4ks\" (UniqueName: \"kubernetes.io/projected/e76d55fd-c894-4236-8921-8b60a88125f7-kube-api-access-jk4ks\") pod \"speaker-b884p\" (UID: \"e76d55fd-c894-4236-8921-8b60a88125f7\") " pod="metallb-system/speaker-b884p" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.403099 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nr7tn\" (UniqueName: \"kubernetes.io/projected/8a2bb6bf-c15d-40aa-9af4-b4c55f67acff-kube-api-access-nr7tn\") pod \"controller-6c7b4b5f48-wgxjz\" (UID: \"8a2bb6bf-c15d-40aa-9af4-b4c55f67acff\") " pod="metallb-system/controller-6c7b4b5f48-wgxjz" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.511767 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-wgxjz" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.791070 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/428c03ad-0ec5-4e31-84e1-1a30cba68bc7-metrics-certs\") pod \"frr-k8s-28hsg\" (UID: \"428c03ad-0ec5-4e31-84e1-1a30cba68bc7\") " pod="metallb-system/frr-k8s-28hsg" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.791481 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e740c0fa-a972-42fe-8e95-aaed01b46916-cert\") pod \"frr-k8s-webhook-server-6998585d5-bbnct\" (UID: \"e740c0fa-a972-42fe-8e95-aaed01b46916\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-bbnct" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.795713 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e740c0fa-a972-42fe-8e95-aaed01b46916-cert\") pod \"frr-k8s-webhook-server-6998585d5-bbnct\" (UID: \"e740c0fa-a972-42fe-8e95-aaed01b46916\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-bbnct" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.796218 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/428c03ad-0ec5-4e31-84e1-1a30cba68bc7-metrics-certs\") pod \"frr-k8s-28hsg\" (UID: \"428c03ad-0ec5-4e31-84e1-1a30cba68bc7\") " pod="metallb-system/frr-k8s-28hsg" Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.893133 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/e76d55fd-c894-4236-8921-8b60a88125f7-memberlist\") pod \"speaker-b884p\" (UID: \"e76d55fd-c894-4236-8921-8b60a88125f7\") " pod="metallb-system/speaker-b884p" Nov 24 08:09:30 crc kubenswrapper[4691]: E1124 08:09:30.893358 4691 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 24 08:09:30 crc kubenswrapper[4691]: E1124 08:09:30.893466 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e76d55fd-c894-4236-8921-8b60a88125f7-memberlist podName:e76d55fd-c894-4236-8921-8b60a88125f7 nodeName:}" failed. No retries permitted until 2025-11-24 08:09:31.893431212 +0000 UTC m=+733.892380461 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/e76d55fd-c894-4236-8921-8b60a88125f7-memberlist") pod "speaker-b884p" (UID: "e76d55fd-c894-4236-8921-8b60a88125f7") : secret "metallb-memberlist" not found Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.920985 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-wgxjz"] Nov 24 08:09:30 crc kubenswrapper[4691]: I1124 08:09:30.964917 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-wgxjz" event={"ID":"8a2bb6bf-c15d-40aa-9af4-b4c55f67acff","Type":"ContainerStarted","Data":"1850d7552b14ff2c6ca1af688f295e8a0bf2f1ac164bc7fcf7879435b0bdbaa2"} Nov 24 08:09:31 crc kubenswrapper[4691]: I1124 08:09:31.030694 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-28hsg" Nov 24 08:09:31 crc kubenswrapper[4691]: I1124 08:09:31.040559 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-bbnct" Nov 24 08:09:31 crc kubenswrapper[4691]: I1124 08:09:31.287906 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-bbnct"] Nov 24 08:09:31 crc kubenswrapper[4691]: W1124 08:09:31.298485 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode740c0fa_a972_42fe_8e95_aaed01b46916.slice/crio-c4b6c6fccae45bbc2f1079d1c4c2b7b171f96d1041f4935a83f65cd7107f6592 WatchSource:0}: Error finding container c4b6c6fccae45bbc2f1079d1c4c2b7b171f96d1041f4935a83f65cd7107f6592: Status 404 returned error can't find the container with id c4b6c6fccae45bbc2f1079d1c4c2b7b171f96d1041f4935a83f65cd7107f6592 Nov 24 08:09:31 crc kubenswrapper[4691]: I1124 08:09:31.907975 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/e76d55fd-c894-4236-8921-8b60a88125f7-memberlist\") pod \"speaker-b884p\" (UID: \"e76d55fd-c894-4236-8921-8b60a88125f7\") " pod="metallb-system/speaker-b884p" Nov 24 08:09:31 crc kubenswrapper[4691]: I1124 08:09:31.915374 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/e76d55fd-c894-4236-8921-8b60a88125f7-memberlist\") pod \"speaker-b884p\" (UID: \"e76d55fd-c894-4236-8921-8b60a88125f7\") " pod="metallb-system/speaker-b884p" Nov 24 08:09:31 crc kubenswrapper[4691]: I1124 08:09:31.971685 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-28hsg" event={"ID":"428c03ad-0ec5-4e31-84e1-1a30cba68bc7","Type":"ContainerStarted","Data":"7a75155b89890cc50695fa619fbf0e7d9eeaf3cab5a55601f7ed7f41f888b6cf"} Nov 24 08:09:31 crc kubenswrapper[4691]: I1124 08:09:31.972723 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-bbnct" event={"ID":"e740c0fa-a972-42fe-8e95-aaed01b46916","Type":"ContainerStarted","Data":"c4b6c6fccae45bbc2f1079d1c4c2b7b171f96d1041f4935a83f65cd7107f6592"} Nov 24 08:09:31 crc kubenswrapper[4691]: I1124 08:09:31.974736 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-wgxjz" event={"ID":"8a2bb6bf-c15d-40aa-9af4-b4c55f67acff","Type":"ContainerStarted","Data":"687ffcb1839601f625beeb9ec6136f21733e07f00ead52e54d59bd1a921019ef"} Nov 24 08:09:31 crc kubenswrapper[4691]: I1124 08:09:31.974760 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-wgxjz" event={"ID":"8a2bb6bf-c15d-40aa-9af4-b4c55f67acff","Type":"ContainerStarted","Data":"87ffb5ea75f819db0c59a8677c2c2f5b5ea5337db036e62d7fffc62a5c383330"} Nov 24 08:09:31 crc kubenswrapper[4691]: I1124 08:09:31.974933 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6c7b4b5f48-wgxjz" Nov 24 08:09:31 crc kubenswrapper[4691]: I1124 08:09:31.981559 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-b884p" Nov 24 08:09:32 crc kubenswrapper[4691]: W1124 08:09:32.021219 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode76d55fd_c894_4236_8921_8b60a88125f7.slice/crio-4cffac5d3b52b5c9d68e63b199a50b2dbd1955df7d6499f935666c289cbdb349 WatchSource:0}: Error finding container 4cffac5d3b52b5c9d68e63b199a50b2dbd1955df7d6499f935666c289cbdb349: Status 404 returned error can't find the container with id 4cffac5d3b52b5c9d68e63b199a50b2dbd1955df7d6499f935666c289cbdb349 Nov 24 08:09:32 crc kubenswrapper[4691]: I1124 08:09:32.988607 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-b884p" event={"ID":"e76d55fd-c894-4236-8921-8b60a88125f7","Type":"ContainerStarted","Data":"9f88d3d9cd9d12264c210c51109dd6e417373b744240e6837f53957f44873937"} Nov 24 08:09:32 crc kubenswrapper[4691]: I1124 08:09:32.988680 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-b884p" event={"ID":"e76d55fd-c894-4236-8921-8b60a88125f7","Type":"ContainerStarted","Data":"4a5ab85cffa1ba5b648b81e51630275dc55b4dc7ea7960860b030b2dc069e0e3"} Nov 24 08:09:32 crc kubenswrapper[4691]: I1124 08:09:32.988695 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-b884p" event={"ID":"e76d55fd-c894-4236-8921-8b60a88125f7","Type":"ContainerStarted","Data":"4cffac5d3b52b5c9d68e63b199a50b2dbd1955df7d6499f935666c289cbdb349"} Nov 24 08:09:32 crc kubenswrapper[4691]: I1124 08:09:32.988958 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-b884p" Nov 24 08:09:33 crc kubenswrapper[4691]: I1124 08:09:33.018543 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-b884p" podStartSLOduration=3.018522552 podStartE2EDuration="3.018522552s" podCreationTimestamp="2025-11-24 08:09:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:09:33.016015709 +0000 UTC m=+735.014964968" watchObservedRunningTime="2025-11-24 08:09:33.018522552 +0000 UTC m=+735.017471801" Nov 24 08:09:33 crc kubenswrapper[4691]: I1124 08:09:33.019960 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6c7b4b5f48-wgxjz" podStartSLOduration=3.019955303 podStartE2EDuration="3.019955303s" podCreationTimestamp="2025-11-24 08:09:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:09:32.03875557 +0000 UTC m=+734.037704839" watchObservedRunningTime="2025-11-24 08:09:33.019955303 +0000 UTC m=+735.018904552" Nov 24 08:09:39 crc kubenswrapper[4691]: I1124 08:09:39.059614 4691 generic.go:334] "Generic (PLEG): container finished" podID="428c03ad-0ec5-4e31-84e1-1a30cba68bc7" containerID="67e66576b23d96f2a99597cb811526004dce038f7e36a6709282aa1b156ae5c3" exitCode=0 Nov 24 08:09:39 crc kubenswrapper[4691]: I1124 08:09:39.059760 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-28hsg" event={"ID":"428c03ad-0ec5-4e31-84e1-1a30cba68bc7","Type":"ContainerDied","Data":"67e66576b23d96f2a99597cb811526004dce038f7e36a6709282aa1b156ae5c3"} Nov 24 08:09:39 crc kubenswrapper[4691]: I1124 08:09:39.066500 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-bbnct" event={"ID":"e740c0fa-a972-42fe-8e95-aaed01b46916","Type":"ContainerStarted","Data":"49b8ce675519cd444ecd2692ae91aa4e56b7937d5f5eec66b6b61825bc05833a"} Nov 24 08:09:39 crc kubenswrapper[4691]: I1124 08:09:39.066702 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-6998585d5-bbnct" Nov 24 08:09:40 crc kubenswrapper[4691]: I1124 08:09:40.077436 4691 generic.go:334] "Generic (PLEG): container finished" podID="428c03ad-0ec5-4e31-84e1-1a30cba68bc7" containerID="6fa4e71d68e33c96cb610d7135d4caf2efd3d673d5973a24dc5ddb18a2216681" exitCode=0 Nov 24 08:09:40 crc kubenswrapper[4691]: I1124 08:09:40.077539 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-28hsg" event={"ID":"428c03ad-0ec5-4e31-84e1-1a30cba68bc7","Type":"ContainerDied","Data":"6fa4e71d68e33c96cb610d7135d4caf2efd3d673d5973a24dc5ddb18a2216681"} Nov 24 08:09:40 crc kubenswrapper[4691]: I1124 08:09:40.124014 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-6998585d5-bbnct" podStartSLOduration=3.2817732299999998 podStartE2EDuration="10.123977979s" podCreationTimestamp="2025-11-24 08:09:30 +0000 UTC" firstStartedPulling="2025-11-24 08:09:31.317634574 +0000 UTC m=+733.316583823" lastFinishedPulling="2025-11-24 08:09:38.159839323 +0000 UTC m=+740.158788572" observedRunningTime="2025-11-24 08:09:39.116584737 +0000 UTC m=+741.115534026" watchObservedRunningTime="2025-11-24 08:09:40.123977979 +0000 UTC m=+742.122927268" Nov 24 08:09:41 crc kubenswrapper[4691]: I1124 08:09:41.088798 4691 generic.go:334] "Generic (PLEG): container finished" podID="428c03ad-0ec5-4e31-84e1-1a30cba68bc7" containerID="e771fa1bfeb93d80c07ac0968b9806f29c62fb65043d3873e182c4078f6d30d2" exitCode=0 Nov 24 08:09:41 crc kubenswrapper[4691]: I1124 08:09:41.088918 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-28hsg" event={"ID":"428c03ad-0ec5-4e31-84e1-1a30cba68bc7","Type":"ContainerDied","Data":"e771fa1bfeb93d80c07ac0968b9806f29c62fb65043d3873e182c4078f6d30d2"} Nov 24 08:09:42 crc kubenswrapper[4691]: I1124 08:09:42.100079 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-28hsg" event={"ID":"428c03ad-0ec5-4e31-84e1-1a30cba68bc7","Type":"ContainerStarted","Data":"daadfc883af4c0b018ea8bbab7cc334545dd28ef10a274c2c0932554aaa927f2"} Nov 24 08:09:42 crc kubenswrapper[4691]: I1124 08:09:42.100130 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-28hsg" event={"ID":"428c03ad-0ec5-4e31-84e1-1a30cba68bc7","Type":"ContainerStarted","Data":"85a64dcdf55c3c63deb81a7ff9795b31354230c45b6cfe760a25e849881e0ff7"} Nov 24 08:09:42 crc kubenswrapper[4691]: I1124 08:09:42.100141 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-28hsg" event={"ID":"428c03ad-0ec5-4e31-84e1-1a30cba68bc7","Type":"ContainerStarted","Data":"f4124240a5cd006a2d45b03dd82dbf46dce953be988a7d0796a76bb17148f765"} Nov 24 08:09:42 crc kubenswrapper[4691]: I1124 08:09:42.100150 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-28hsg" event={"ID":"428c03ad-0ec5-4e31-84e1-1a30cba68bc7","Type":"ContainerStarted","Data":"3ca1c908cc7e92a248d92fbd1de50fdb5fe450a244aa0a18a6461e821f5406e4"} Nov 24 08:09:42 crc kubenswrapper[4691]: I1124 08:09:42.100161 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-28hsg" event={"ID":"428c03ad-0ec5-4e31-84e1-1a30cba68bc7","Type":"ContainerStarted","Data":"3559e50f90d88fb6e81e25bdba8880d1b4cf54fa9acf1adeceeb38f3dbfb52e8"} Nov 24 08:09:43 crc kubenswrapper[4691]: I1124 08:09:43.112630 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-28hsg" event={"ID":"428c03ad-0ec5-4e31-84e1-1a30cba68bc7","Type":"ContainerStarted","Data":"a7d47ec0bcc1f71dcac775914d808178ce308df84e2bc3a4fec82ba7eeb11ccb"} Nov 24 08:09:43 crc kubenswrapper[4691]: I1124 08:09:43.113147 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-28hsg" Nov 24 08:09:43 crc kubenswrapper[4691]: I1124 08:09:43.142152 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-28hsg" podStartSLOduration=6.137255565 podStartE2EDuration="13.142123257s" podCreationTimestamp="2025-11-24 08:09:30 +0000 UTC" firstStartedPulling="2025-11-24 08:09:31.18599773 +0000 UTC m=+733.184946979" lastFinishedPulling="2025-11-24 08:09:38.190865422 +0000 UTC m=+740.189814671" observedRunningTime="2025-11-24 08:09:43.138936934 +0000 UTC m=+745.137886183" watchObservedRunningTime="2025-11-24 08:09:43.142123257 +0000 UTC m=+745.141072506" Nov 24 08:09:46 crc kubenswrapper[4691]: I1124 08:09:46.031531 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-28hsg" Nov 24 08:09:46 crc kubenswrapper[4691]: I1124 08:09:46.073213 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-28hsg" Nov 24 08:09:50 crc kubenswrapper[4691]: I1124 08:09:50.517859 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6c7b4b5f48-wgxjz" Nov 24 08:09:51 crc kubenswrapper[4691]: I1124 08:09:51.039017 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-28hsg" Nov 24 08:09:51 crc kubenswrapper[4691]: I1124 08:09:51.048217 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-6998585d5-bbnct" Nov 24 08:09:51 crc kubenswrapper[4691]: I1124 08:09:51.089084 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:09:51 crc kubenswrapper[4691]: I1124 08:09:51.089146 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:09:51 crc kubenswrapper[4691]: I1124 08:09:51.984790 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-b884p" Nov 24 08:09:54 crc kubenswrapper[4691]: I1124 08:09:54.645808 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-p2w5f"] Nov 24 08:09:54 crc kubenswrapper[4691]: I1124 08:09:54.646718 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-p2w5f" Nov 24 08:09:54 crc kubenswrapper[4691]: I1124 08:09:54.651171 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 24 08:09:54 crc kubenswrapper[4691]: I1124 08:09:54.651274 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 24 08:09:54 crc kubenswrapper[4691]: I1124 08:09:54.651178 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-wvpnk" Nov 24 08:09:54 crc kubenswrapper[4691]: I1124 08:09:54.681036 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-p2w5f"] Nov 24 08:09:54 crc kubenswrapper[4691]: I1124 08:09:54.791516 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-snhfr\" (UniqueName: \"kubernetes.io/projected/1e01fb7c-cab7-4a5d-ad17-55d0ef08f9a7-kube-api-access-snhfr\") pod \"openstack-operator-index-p2w5f\" (UID: \"1e01fb7c-cab7-4a5d-ad17-55d0ef08f9a7\") " pod="openstack-operators/openstack-operator-index-p2w5f" Nov 24 08:09:54 crc kubenswrapper[4691]: I1124 08:09:54.893306 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-snhfr\" (UniqueName: \"kubernetes.io/projected/1e01fb7c-cab7-4a5d-ad17-55d0ef08f9a7-kube-api-access-snhfr\") pod \"openstack-operator-index-p2w5f\" (UID: \"1e01fb7c-cab7-4a5d-ad17-55d0ef08f9a7\") " pod="openstack-operators/openstack-operator-index-p2w5f" Nov 24 08:09:54 crc kubenswrapper[4691]: I1124 08:09:54.913224 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-snhfr\" (UniqueName: \"kubernetes.io/projected/1e01fb7c-cab7-4a5d-ad17-55d0ef08f9a7-kube-api-access-snhfr\") pod \"openstack-operator-index-p2w5f\" (UID: \"1e01fb7c-cab7-4a5d-ad17-55d0ef08f9a7\") " pod="openstack-operators/openstack-operator-index-p2w5f" Nov 24 08:09:54 crc kubenswrapper[4691]: I1124 08:09:54.969543 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-p2w5f" Nov 24 08:09:55 crc kubenswrapper[4691]: I1124 08:09:55.205112 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-p2w5f"] Nov 24 08:09:55 crc kubenswrapper[4691]: W1124 08:09:55.215035 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1e01fb7c_cab7_4a5d_ad17_55d0ef08f9a7.slice/crio-e2b24e98684b23ad11d6f4e09a342eb368dbebda3c36f4963e6b3d464361b1a3 WatchSource:0}: Error finding container e2b24e98684b23ad11d6f4e09a342eb368dbebda3c36f4963e6b3d464361b1a3: Status 404 returned error can't find the container with id e2b24e98684b23ad11d6f4e09a342eb368dbebda3c36f4963e6b3d464361b1a3 Nov 24 08:09:56 crc kubenswrapper[4691]: I1124 08:09:56.221353 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-p2w5f" event={"ID":"1e01fb7c-cab7-4a5d-ad17-55d0ef08f9a7","Type":"ContainerStarted","Data":"e2b24e98684b23ad11d6f4e09a342eb368dbebda3c36f4963e6b3d464361b1a3"} Nov 24 08:09:58 crc kubenswrapper[4691]: I1124 08:09:58.006234 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-p2w5f"] Nov 24 08:09:58 crc kubenswrapper[4691]: I1124 08:09:58.612619 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-4ppc7"] Nov 24 08:09:58 crc kubenswrapper[4691]: I1124 08:09:58.613708 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-4ppc7" Nov 24 08:09:58 crc kubenswrapper[4691]: I1124 08:09:58.622472 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-4ppc7"] Nov 24 08:09:58 crc kubenswrapper[4691]: I1124 08:09:58.760245 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7x8zd\" (UniqueName: \"kubernetes.io/projected/c57ebb8d-e8cb-4e4a-af63-e79986c327a5-kube-api-access-7x8zd\") pod \"openstack-operator-index-4ppc7\" (UID: \"c57ebb8d-e8cb-4e4a-af63-e79986c327a5\") " pod="openstack-operators/openstack-operator-index-4ppc7" Nov 24 08:09:58 crc kubenswrapper[4691]: I1124 08:09:58.861870 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7x8zd\" (UniqueName: \"kubernetes.io/projected/c57ebb8d-e8cb-4e4a-af63-e79986c327a5-kube-api-access-7x8zd\") pod \"openstack-operator-index-4ppc7\" (UID: \"c57ebb8d-e8cb-4e4a-af63-e79986c327a5\") " pod="openstack-operators/openstack-operator-index-4ppc7" Nov 24 08:09:58 crc kubenswrapper[4691]: I1124 08:09:58.884876 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7x8zd\" (UniqueName: \"kubernetes.io/projected/c57ebb8d-e8cb-4e4a-af63-e79986c327a5-kube-api-access-7x8zd\") pod \"openstack-operator-index-4ppc7\" (UID: \"c57ebb8d-e8cb-4e4a-af63-e79986c327a5\") " pod="openstack-operators/openstack-operator-index-4ppc7" Nov 24 08:09:58 crc kubenswrapper[4691]: I1124 08:09:58.930864 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-4ppc7" Nov 24 08:09:59 crc kubenswrapper[4691]: I1124 08:09:59.245480 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-p2w5f" event={"ID":"1e01fb7c-cab7-4a5d-ad17-55d0ef08f9a7","Type":"ContainerStarted","Data":"c0a99ccaba39ffe61df0fabd774429957a3b6e98d45d776b0a257e7527153a71"} Nov 24 08:09:59 crc kubenswrapper[4691]: I1124 08:09:59.245622 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-p2w5f" podUID="1e01fb7c-cab7-4a5d-ad17-55d0ef08f9a7" containerName="registry-server" containerID="cri-o://c0a99ccaba39ffe61df0fabd774429957a3b6e98d45d776b0a257e7527153a71" gracePeriod=2 Nov 24 08:09:59 crc kubenswrapper[4691]: I1124 08:09:59.267996 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-p2w5f" podStartSLOduration=2.283033916 podStartE2EDuration="5.267972351s" podCreationTimestamp="2025-11-24 08:09:54 +0000 UTC" firstStartedPulling="2025-11-24 08:09:55.217555171 +0000 UTC m=+757.216504420" lastFinishedPulling="2025-11-24 08:09:58.202493606 +0000 UTC m=+760.201442855" observedRunningTime="2025-11-24 08:09:59.263600754 +0000 UTC m=+761.262550013" watchObservedRunningTime="2025-11-24 08:09:59.267972351 +0000 UTC m=+761.266921600" Nov 24 08:09:59 crc kubenswrapper[4691]: I1124 08:09:59.351237 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-4ppc7"] Nov 24 08:09:59 crc kubenswrapper[4691]: I1124 08:09:59.626499 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-p2w5f" Nov 24 08:09:59 crc kubenswrapper[4691]: I1124 08:09:59.774239 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-snhfr\" (UniqueName: \"kubernetes.io/projected/1e01fb7c-cab7-4a5d-ad17-55d0ef08f9a7-kube-api-access-snhfr\") pod \"1e01fb7c-cab7-4a5d-ad17-55d0ef08f9a7\" (UID: \"1e01fb7c-cab7-4a5d-ad17-55d0ef08f9a7\") " Nov 24 08:09:59 crc kubenswrapper[4691]: I1124 08:09:59.785931 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e01fb7c-cab7-4a5d-ad17-55d0ef08f9a7-kube-api-access-snhfr" (OuterVolumeSpecName: "kube-api-access-snhfr") pod "1e01fb7c-cab7-4a5d-ad17-55d0ef08f9a7" (UID: "1e01fb7c-cab7-4a5d-ad17-55d0ef08f9a7"). InnerVolumeSpecName "kube-api-access-snhfr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:09:59 crc kubenswrapper[4691]: I1124 08:09:59.876765 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-snhfr\" (UniqueName: \"kubernetes.io/projected/1e01fb7c-cab7-4a5d-ad17-55d0ef08f9a7-kube-api-access-snhfr\") on node \"crc\" DevicePath \"\"" Nov 24 08:10:00 crc kubenswrapper[4691]: I1124 08:10:00.254121 4691 generic.go:334] "Generic (PLEG): container finished" podID="1e01fb7c-cab7-4a5d-ad17-55d0ef08f9a7" containerID="c0a99ccaba39ffe61df0fabd774429957a3b6e98d45d776b0a257e7527153a71" exitCode=0 Nov 24 08:10:00 crc kubenswrapper[4691]: I1124 08:10:00.254191 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-p2w5f" event={"ID":"1e01fb7c-cab7-4a5d-ad17-55d0ef08f9a7","Type":"ContainerDied","Data":"c0a99ccaba39ffe61df0fabd774429957a3b6e98d45d776b0a257e7527153a71"} Nov 24 08:10:00 crc kubenswrapper[4691]: I1124 08:10:00.254270 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-p2w5f" event={"ID":"1e01fb7c-cab7-4a5d-ad17-55d0ef08f9a7","Type":"ContainerDied","Data":"e2b24e98684b23ad11d6f4e09a342eb368dbebda3c36f4963e6b3d464361b1a3"} Nov 24 08:10:00 crc kubenswrapper[4691]: I1124 08:10:00.254299 4691 scope.go:117] "RemoveContainer" containerID="c0a99ccaba39ffe61df0fabd774429957a3b6e98d45d776b0a257e7527153a71" Nov 24 08:10:00 crc kubenswrapper[4691]: I1124 08:10:00.254210 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-p2w5f" Nov 24 08:10:00 crc kubenswrapper[4691]: I1124 08:10:00.257174 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-4ppc7" event={"ID":"c57ebb8d-e8cb-4e4a-af63-e79986c327a5","Type":"ContainerStarted","Data":"2f05f595610baf298d629e1a3c3cbc0553c5f5e1853d1457dc080bf9a21c6e7e"} Nov 24 08:10:00 crc kubenswrapper[4691]: I1124 08:10:00.257222 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-4ppc7" event={"ID":"c57ebb8d-e8cb-4e4a-af63-e79986c327a5","Type":"ContainerStarted","Data":"9d2bdfebd70103ea983a3f3baf3bfa08d104f6e0f8502689c3deb2ed0a1c245b"} Nov 24 08:10:00 crc kubenswrapper[4691]: I1124 08:10:00.279135 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-4ppc7" podStartSLOduration=2.23423871 podStartE2EDuration="2.27910477s" podCreationTimestamp="2025-11-24 08:09:58 +0000 UTC" firstStartedPulling="2025-11-24 08:09:59.363651513 +0000 UTC m=+761.362600762" lastFinishedPulling="2025-11-24 08:09:59.408517573 +0000 UTC m=+761.407466822" observedRunningTime="2025-11-24 08:10:00.274056524 +0000 UTC m=+762.273005783" watchObservedRunningTime="2025-11-24 08:10:00.27910477 +0000 UTC m=+762.278054019" Nov 24 08:10:00 crc kubenswrapper[4691]: I1124 08:10:00.283551 4691 scope.go:117] "RemoveContainer" containerID="c0a99ccaba39ffe61df0fabd774429957a3b6e98d45d776b0a257e7527153a71" Nov 24 08:10:00 crc kubenswrapper[4691]: E1124 08:10:00.284092 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c0a99ccaba39ffe61df0fabd774429957a3b6e98d45d776b0a257e7527153a71\": container with ID starting with c0a99ccaba39ffe61df0fabd774429957a3b6e98d45d776b0a257e7527153a71 not found: ID does not exist" containerID="c0a99ccaba39ffe61df0fabd774429957a3b6e98d45d776b0a257e7527153a71" Nov 24 08:10:00 crc kubenswrapper[4691]: I1124 08:10:00.284132 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c0a99ccaba39ffe61df0fabd774429957a3b6e98d45d776b0a257e7527153a71"} err="failed to get container status \"c0a99ccaba39ffe61df0fabd774429957a3b6e98d45d776b0a257e7527153a71\": rpc error: code = NotFound desc = could not find container \"c0a99ccaba39ffe61df0fabd774429957a3b6e98d45d776b0a257e7527153a71\": container with ID starting with c0a99ccaba39ffe61df0fabd774429957a3b6e98d45d776b0a257e7527153a71 not found: ID does not exist" Nov 24 08:10:00 crc kubenswrapper[4691]: I1124 08:10:00.300185 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-p2w5f"] Nov 24 08:10:00 crc kubenswrapper[4691]: I1124 08:10:00.304283 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-p2w5f"] Nov 24 08:10:00 crc kubenswrapper[4691]: I1124 08:10:00.779953 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1e01fb7c-cab7-4a5d-ad17-55d0ef08f9a7" path="/var/lib/kubelet/pods/1e01fb7c-cab7-4a5d-ad17-55d0ef08f9a7/volumes" Nov 24 08:10:08 crc kubenswrapper[4691]: I1124 08:10:08.652945 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-7p2br"] Nov 24 08:10:08 crc kubenswrapper[4691]: I1124 08:10:08.653618 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-7p2br" podUID="53aa0d7f-022e-46a2-9e47-442eca753bbc" containerName="controller-manager" containerID="cri-o://4808f892e07c0f47db01cc95d7f770e181c0db20cc06db04e7d2b9a5c5a8fc21" gracePeriod=30 Nov 24 08:10:08 crc kubenswrapper[4691]: I1124 08:10:08.732510 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-zxfcj"] Nov 24 08:10:08 crc kubenswrapper[4691]: I1124 08:10:08.732780 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zxfcj" podUID="6d32b123-7986-4bd2-abdf-b8be8c855817" containerName="route-controller-manager" containerID="cri-o://cda0ce892e23123d7f58a5adaa328c85e0e79b70b03b6d40ac1d3baa53180407" gracePeriod=30 Nov 24 08:10:08 crc kubenswrapper[4691]: I1124 08:10:08.930981 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-4ppc7" Nov 24 08:10:08 crc kubenswrapper[4691]: I1124 08:10:08.931409 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-4ppc7" Nov 24 08:10:08 crc kubenswrapper[4691]: I1124 08:10:08.962162 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-4ppc7" Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.086839 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-7p2br" Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.187482 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zxfcj" Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.212034 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/53aa0d7f-022e-46a2-9e47-442eca753bbc-proxy-ca-bundles\") pod \"53aa0d7f-022e-46a2-9e47-442eca753bbc\" (UID: \"53aa0d7f-022e-46a2-9e47-442eca753bbc\") " Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.212558 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-45qj5\" (UniqueName: \"kubernetes.io/projected/53aa0d7f-022e-46a2-9e47-442eca753bbc-kube-api-access-45qj5\") pod \"53aa0d7f-022e-46a2-9e47-442eca753bbc\" (UID: \"53aa0d7f-022e-46a2-9e47-442eca753bbc\") " Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.212654 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/53aa0d7f-022e-46a2-9e47-442eca753bbc-config\") pod \"53aa0d7f-022e-46a2-9e47-442eca753bbc\" (UID: \"53aa0d7f-022e-46a2-9e47-442eca753bbc\") " Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.212797 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/53aa0d7f-022e-46a2-9e47-442eca753bbc-client-ca\") pod \"53aa0d7f-022e-46a2-9e47-442eca753bbc\" (UID: \"53aa0d7f-022e-46a2-9e47-442eca753bbc\") " Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.212819 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/53aa0d7f-022e-46a2-9e47-442eca753bbc-serving-cert\") pod \"53aa0d7f-022e-46a2-9e47-442eca753bbc\" (UID: \"53aa0d7f-022e-46a2-9e47-442eca753bbc\") " Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.213124 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/53aa0d7f-022e-46a2-9e47-442eca753bbc-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "53aa0d7f-022e-46a2-9e47-442eca753bbc" (UID: "53aa0d7f-022e-46a2-9e47-442eca753bbc"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.213220 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/53aa0d7f-022e-46a2-9e47-442eca753bbc-config" (OuterVolumeSpecName: "config") pod "53aa0d7f-022e-46a2-9e47-442eca753bbc" (UID: "53aa0d7f-022e-46a2-9e47-442eca753bbc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.213681 4691 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/53aa0d7f-022e-46a2-9e47-442eca753bbc-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.213705 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/53aa0d7f-022e-46a2-9e47-442eca753bbc-config\") on node \"crc\" DevicePath \"\"" Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.213919 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/53aa0d7f-022e-46a2-9e47-442eca753bbc-client-ca" (OuterVolumeSpecName: "client-ca") pod "53aa0d7f-022e-46a2-9e47-442eca753bbc" (UID: "53aa0d7f-022e-46a2-9e47-442eca753bbc"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.220412 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/53aa0d7f-022e-46a2-9e47-442eca753bbc-kube-api-access-45qj5" (OuterVolumeSpecName: "kube-api-access-45qj5") pod "53aa0d7f-022e-46a2-9e47-442eca753bbc" (UID: "53aa0d7f-022e-46a2-9e47-442eca753bbc"). InnerVolumeSpecName "kube-api-access-45qj5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.220791 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53aa0d7f-022e-46a2-9e47-442eca753bbc-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "53aa0d7f-022e-46a2-9e47-442eca753bbc" (UID: "53aa0d7f-022e-46a2-9e47-442eca753bbc"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.314679 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d32b123-7986-4bd2-abdf-b8be8c855817-serving-cert\") pod \"6d32b123-7986-4bd2-abdf-b8be8c855817\" (UID: \"6d32b123-7986-4bd2-abdf-b8be8c855817\") " Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.314797 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d32b123-7986-4bd2-abdf-b8be8c855817-config\") pod \"6d32b123-7986-4bd2-abdf-b8be8c855817\" (UID: \"6d32b123-7986-4bd2-abdf-b8be8c855817\") " Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.314822 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b6sjm\" (UniqueName: \"kubernetes.io/projected/6d32b123-7986-4bd2-abdf-b8be8c855817-kube-api-access-b6sjm\") pod \"6d32b123-7986-4bd2-abdf-b8be8c855817\" (UID: \"6d32b123-7986-4bd2-abdf-b8be8c855817\") " Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.314843 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6d32b123-7986-4bd2-abdf-b8be8c855817-client-ca\") pod \"6d32b123-7986-4bd2-abdf-b8be8c855817\" (UID: \"6d32b123-7986-4bd2-abdf-b8be8c855817\") " Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.315096 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-45qj5\" (UniqueName: \"kubernetes.io/projected/53aa0d7f-022e-46a2-9e47-442eca753bbc-kube-api-access-45qj5\") on node \"crc\" DevicePath \"\"" Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.315112 4691 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/53aa0d7f-022e-46a2-9e47-442eca753bbc-client-ca\") on node \"crc\" DevicePath \"\"" Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.315120 4691 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/53aa0d7f-022e-46a2-9e47-442eca753bbc-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.315679 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d32b123-7986-4bd2-abdf-b8be8c855817-client-ca" (OuterVolumeSpecName: "client-ca") pod "6d32b123-7986-4bd2-abdf-b8be8c855817" (UID: "6d32b123-7986-4bd2-abdf-b8be8c855817"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.315869 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d32b123-7986-4bd2-abdf-b8be8c855817-config" (OuterVolumeSpecName: "config") pod "6d32b123-7986-4bd2-abdf-b8be8c855817" (UID: "6d32b123-7986-4bd2-abdf-b8be8c855817"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.318926 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d32b123-7986-4bd2-abdf-b8be8c855817-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6d32b123-7986-4bd2-abdf-b8be8c855817" (UID: "6d32b123-7986-4bd2-abdf-b8be8c855817"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.320917 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d32b123-7986-4bd2-abdf-b8be8c855817-kube-api-access-b6sjm" (OuterVolumeSpecName: "kube-api-access-b6sjm") pod "6d32b123-7986-4bd2-abdf-b8be8c855817" (UID: "6d32b123-7986-4bd2-abdf-b8be8c855817"). InnerVolumeSpecName "kube-api-access-b6sjm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.331115 4691 generic.go:334] "Generic (PLEG): container finished" podID="6d32b123-7986-4bd2-abdf-b8be8c855817" containerID="cda0ce892e23123d7f58a5adaa328c85e0e79b70b03b6d40ac1d3baa53180407" exitCode=0 Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.331214 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zxfcj" event={"ID":"6d32b123-7986-4bd2-abdf-b8be8c855817","Type":"ContainerDied","Data":"cda0ce892e23123d7f58a5adaa328c85e0e79b70b03b6d40ac1d3baa53180407"} Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.331218 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zxfcj" Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.331256 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zxfcj" event={"ID":"6d32b123-7986-4bd2-abdf-b8be8c855817","Type":"ContainerDied","Data":"deb8ba6928cbb20c87380a660cddedb7530cce603e31132aecb1352fceadb165"} Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.331278 4691 scope.go:117] "RemoveContainer" containerID="cda0ce892e23123d7f58a5adaa328c85e0e79b70b03b6d40ac1d3baa53180407" Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.333791 4691 generic.go:334] "Generic (PLEG): container finished" podID="53aa0d7f-022e-46a2-9e47-442eca753bbc" containerID="4808f892e07c0f47db01cc95d7f770e181c0db20cc06db04e7d2b9a5c5a8fc21" exitCode=0 Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.333847 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-7p2br" Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.333905 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-7p2br" event={"ID":"53aa0d7f-022e-46a2-9e47-442eca753bbc","Type":"ContainerDied","Data":"4808f892e07c0f47db01cc95d7f770e181c0db20cc06db04e7d2b9a5c5a8fc21"} Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.333982 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-7p2br" event={"ID":"53aa0d7f-022e-46a2-9e47-442eca753bbc","Type":"ContainerDied","Data":"f9e05d2cc98059d0803b3f85d142fe15e42b3f82dbe59085c4ee4c3628e5779c"} Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.352600 4691 scope.go:117] "RemoveContainer" containerID="cda0ce892e23123d7f58a5adaa328c85e0e79b70b03b6d40ac1d3baa53180407" Nov 24 08:10:09 crc kubenswrapper[4691]: E1124 08:10:09.353213 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cda0ce892e23123d7f58a5adaa328c85e0e79b70b03b6d40ac1d3baa53180407\": container with ID starting with cda0ce892e23123d7f58a5adaa328c85e0e79b70b03b6d40ac1d3baa53180407 not found: ID does not exist" containerID="cda0ce892e23123d7f58a5adaa328c85e0e79b70b03b6d40ac1d3baa53180407" Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.353278 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cda0ce892e23123d7f58a5adaa328c85e0e79b70b03b6d40ac1d3baa53180407"} err="failed to get container status \"cda0ce892e23123d7f58a5adaa328c85e0e79b70b03b6d40ac1d3baa53180407\": rpc error: code = NotFound desc = could not find container \"cda0ce892e23123d7f58a5adaa328c85e0e79b70b03b6d40ac1d3baa53180407\": container with ID starting with cda0ce892e23123d7f58a5adaa328c85e0e79b70b03b6d40ac1d3baa53180407 not found: ID does not exist" Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.353316 4691 scope.go:117] "RemoveContainer" containerID="4808f892e07c0f47db01cc95d7f770e181c0db20cc06db04e7d2b9a5c5a8fc21" Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.380008 4691 scope.go:117] "RemoveContainer" containerID="4808f892e07c0f47db01cc95d7f770e181c0db20cc06db04e7d2b9a5c5a8fc21" Nov 24 08:10:09 crc kubenswrapper[4691]: E1124 08:10:09.382384 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4808f892e07c0f47db01cc95d7f770e181c0db20cc06db04e7d2b9a5c5a8fc21\": container with ID starting with 4808f892e07c0f47db01cc95d7f770e181c0db20cc06db04e7d2b9a5c5a8fc21 not found: ID does not exist" containerID="4808f892e07c0f47db01cc95d7f770e181c0db20cc06db04e7d2b9a5c5a8fc21" Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.382552 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4808f892e07c0f47db01cc95d7f770e181c0db20cc06db04e7d2b9a5c5a8fc21"} err="failed to get container status \"4808f892e07c0f47db01cc95d7f770e181c0db20cc06db04e7d2b9a5c5a8fc21\": rpc error: code = NotFound desc = could not find container \"4808f892e07c0f47db01cc95d7f770e181c0db20cc06db04e7d2b9a5c5a8fc21\": container with ID starting with 4808f892e07c0f47db01cc95d7f770e181c0db20cc06db04e7d2b9a5c5a8fc21 not found: ID does not exist" Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.382890 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-4ppc7" Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.392457 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-7p2br"] Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.400589 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-7p2br"] Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.413019 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-zxfcj"] Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.416275 4691 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d32b123-7986-4bd2-abdf-b8be8c855817-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.416317 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d32b123-7986-4bd2-abdf-b8be8c855817-config\") on node \"crc\" DevicePath \"\"" Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.416327 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b6sjm\" (UniqueName: \"kubernetes.io/projected/6d32b123-7986-4bd2-abdf-b8be8c855817-kube-api-access-b6sjm\") on node \"crc\" DevicePath \"\"" Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.416338 4691 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6d32b123-7986-4bd2-abdf-b8be8c855817-client-ca\") on node \"crc\" DevicePath \"\"" Nov 24 08:10:09 crc kubenswrapper[4691]: I1124 08:10:09.422143 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-zxfcj"] Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.270526 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw"] Nov 24 08:10:10 crc kubenswrapper[4691]: E1124 08:10:10.270884 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e01fb7c-cab7-4a5d-ad17-55d0ef08f9a7" containerName="registry-server" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.270904 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e01fb7c-cab7-4a5d-ad17-55d0ef08f9a7" containerName="registry-server" Nov 24 08:10:10 crc kubenswrapper[4691]: E1124 08:10:10.270931 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d32b123-7986-4bd2-abdf-b8be8c855817" containerName="route-controller-manager" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.270940 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d32b123-7986-4bd2-abdf-b8be8c855817" containerName="route-controller-manager" Nov 24 08:10:10 crc kubenswrapper[4691]: E1124 08:10:10.270952 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53aa0d7f-022e-46a2-9e47-442eca753bbc" containerName="controller-manager" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.270962 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="53aa0d7f-022e-46a2-9e47-442eca753bbc" containerName="controller-manager" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.271120 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e01fb7c-cab7-4a5d-ad17-55d0ef08f9a7" containerName="registry-server" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.271137 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d32b123-7986-4bd2-abdf-b8be8c855817" containerName="route-controller-manager" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.271146 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="53aa0d7f-022e-46a2-9e47-442eca753bbc" containerName="controller-manager" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.272186 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.274751 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-f7q6q" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.281060 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw"] Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.414106 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-59d44967dc-4sg9t"] Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.416652 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-59d44967dc-4sg9t" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.417298 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-7c9dc89848-fmcxj"] Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.418336 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7c9dc89848-fmcxj" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.419273 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.419752 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.420084 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.420240 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.420240 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.420571 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.420747 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.421525 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.421648 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.421631 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.421633 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.421596 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.429416 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.430277 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d08a87e6-8ffb-408e-86d2-ff4994f07ed9-bundle\") pod \"0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw\" (UID: \"d08a87e6-8ffb-408e-86d2-ff4994f07ed9\") " pod="openstack-operators/0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.430372 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7jzg7\" (UniqueName: \"kubernetes.io/projected/d08a87e6-8ffb-408e-86d2-ff4994f07ed9-kube-api-access-7jzg7\") pod \"0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw\" (UID: \"d08a87e6-8ffb-408e-86d2-ff4994f07ed9\") " pod="openstack-operators/0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.430410 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d08a87e6-8ffb-408e-86d2-ff4994f07ed9-util\") pod \"0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw\" (UID: \"d08a87e6-8ffb-408e-86d2-ff4994f07ed9\") " pod="openstack-operators/0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.440750 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-59d44967dc-4sg9t"] Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.447102 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7c9dc89848-fmcxj"] Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.531646 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/57581964-bce3-40d8-adb8-232887e287cb-serving-cert\") pod \"route-controller-manager-59d44967dc-4sg9t\" (UID: \"57581964-bce3-40d8-adb8-232887e287cb\") " pod="openshift-route-controller-manager/route-controller-manager-59d44967dc-4sg9t" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.531706 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7jzg7\" (UniqueName: \"kubernetes.io/projected/d08a87e6-8ffb-408e-86d2-ff4994f07ed9-kube-api-access-7jzg7\") pod \"0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw\" (UID: \"d08a87e6-8ffb-408e-86d2-ff4994f07ed9\") " pod="openstack-operators/0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.531746 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d08a87e6-8ffb-408e-86d2-ff4994f07ed9-util\") pod \"0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw\" (UID: \"d08a87e6-8ffb-408e-86d2-ff4994f07ed9\") " pod="openstack-operators/0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.531780 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/89bf389a-9986-497d-b933-be8274c33271-client-ca\") pod \"controller-manager-7c9dc89848-fmcxj\" (UID: \"89bf389a-9986-497d-b933-be8274c33271\") " pod="openshift-controller-manager/controller-manager-7c9dc89848-fmcxj" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.531812 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/57581964-bce3-40d8-adb8-232887e287cb-client-ca\") pod \"route-controller-manager-59d44967dc-4sg9t\" (UID: \"57581964-bce3-40d8-adb8-232887e287cb\") " pod="openshift-route-controller-manager/route-controller-manager-59d44967dc-4sg9t" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.531845 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/57581964-bce3-40d8-adb8-232887e287cb-config\") pod \"route-controller-manager-59d44967dc-4sg9t\" (UID: \"57581964-bce3-40d8-adb8-232887e287cb\") " pod="openshift-route-controller-manager/route-controller-manager-59d44967dc-4sg9t" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.531876 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/89bf389a-9986-497d-b933-be8274c33271-proxy-ca-bundles\") pod \"controller-manager-7c9dc89848-fmcxj\" (UID: \"89bf389a-9986-497d-b933-be8274c33271\") " pod="openshift-controller-manager/controller-manager-7c9dc89848-fmcxj" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.531897 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d08a87e6-8ffb-408e-86d2-ff4994f07ed9-bundle\") pod \"0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw\" (UID: \"d08a87e6-8ffb-408e-86d2-ff4994f07ed9\") " pod="openstack-operators/0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.531921 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/89bf389a-9986-497d-b933-be8274c33271-config\") pod \"controller-manager-7c9dc89848-fmcxj\" (UID: \"89bf389a-9986-497d-b933-be8274c33271\") " pod="openshift-controller-manager/controller-manager-7c9dc89848-fmcxj" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.531941 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f44td\" (UniqueName: \"kubernetes.io/projected/89bf389a-9986-497d-b933-be8274c33271-kube-api-access-f44td\") pod \"controller-manager-7c9dc89848-fmcxj\" (UID: \"89bf389a-9986-497d-b933-be8274c33271\") " pod="openshift-controller-manager/controller-manager-7c9dc89848-fmcxj" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.531974 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/89bf389a-9986-497d-b933-be8274c33271-serving-cert\") pod \"controller-manager-7c9dc89848-fmcxj\" (UID: \"89bf389a-9986-497d-b933-be8274c33271\") " pod="openshift-controller-manager/controller-manager-7c9dc89848-fmcxj" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.531992 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-srsbb\" (UniqueName: \"kubernetes.io/projected/57581964-bce3-40d8-adb8-232887e287cb-kube-api-access-srsbb\") pod \"route-controller-manager-59d44967dc-4sg9t\" (UID: \"57581964-bce3-40d8-adb8-232887e287cb\") " pod="openshift-route-controller-manager/route-controller-manager-59d44967dc-4sg9t" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.532605 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d08a87e6-8ffb-408e-86d2-ff4994f07ed9-util\") pod \"0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw\" (UID: \"d08a87e6-8ffb-408e-86d2-ff4994f07ed9\") " pod="openstack-operators/0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.532649 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d08a87e6-8ffb-408e-86d2-ff4994f07ed9-bundle\") pod \"0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw\" (UID: \"d08a87e6-8ffb-408e-86d2-ff4994f07ed9\") " pod="openstack-operators/0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.549621 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7jzg7\" (UniqueName: \"kubernetes.io/projected/d08a87e6-8ffb-408e-86d2-ff4994f07ed9-kube-api-access-7jzg7\") pod \"0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw\" (UID: \"d08a87e6-8ffb-408e-86d2-ff4994f07ed9\") " pod="openstack-operators/0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.629356 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.633016 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/57581964-bce3-40d8-adb8-232887e287cb-serving-cert\") pod \"route-controller-manager-59d44967dc-4sg9t\" (UID: \"57581964-bce3-40d8-adb8-232887e287cb\") " pod="openshift-route-controller-manager/route-controller-manager-59d44967dc-4sg9t" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.633810 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/89bf389a-9986-497d-b933-be8274c33271-client-ca\") pod \"controller-manager-7c9dc89848-fmcxj\" (UID: \"89bf389a-9986-497d-b933-be8274c33271\") " pod="openshift-controller-manager/controller-manager-7c9dc89848-fmcxj" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.634078 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/57581964-bce3-40d8-adb8-232887e287cb-client-ca\") pod \"route-controller-manager-59d44967dc-4sg9t\" (UID: \"57581964-bce3-40d8-adb8-232887e287cb\") " pod="openshift-route-controller-manager/route-controller-manager-59d44967dc-4sg9t" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.634122 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/57581964-bce3-40d8-adb8-232887e287cb-config\") pod \"route-controller-manager-59d44967dc-4sg9t\" (UID: \"57581964-bce3-40d8-adb8-232887e287cb\") " pod="openshift-route-controller-manager/route-controller-manager-59d44967dc-4sg9t" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.634160 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/89bf389a-9986-497d-b933-be8274c33271-proxy-ca-bundles\") pod \"controller-manager-7c9dc89848-fmcxj\" (UID: \"89bf389a-9986-497d-b933-be8274c33271\") " pod="openshift-controller-manager/controller-manager-7c9dc89848-fmcxj" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.634382 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/89bf389a-9986-497d-b933-be8274c33271-config\") pod \"controller-manager-7c9dc89848-fmcxj\" (UID: \"89bf389a-9986-497d-b933-be8274c33271\") " pod="openshift-controller-manager/controller-manager-7c9dc89848-fmcxj" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.634417 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f44td\" (UniqueName: \"kubernetes.io/projected/89bf389a-9986-497d-b933-be8274c33271-kube-api-access-f44td\") pod \"controller-manager-7c9dc89848-fmcxj\" (UID: \"89bf389a-9986-497d-b933-be8274c33271\") " pod="openshift-controller-manager/controller-manager-7c9dc89848-fmcxj" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.634640 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/89bf389a-9986-497d-b933-be8274c33271-serving-cert\") pod \"controller-manager-7c9dc89848-fmcxj\" (UID: \"89bf389a-9986-497d-b933-be8274c33271\") " pod="openshift-controller-manager/controller-manager-7c9dc89848-fmcxj" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.634674 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-srsbb\" (UniqueName: \"kubernetes.io/projected/57581964-bce3-40d8-adb8-232887e287cb-kube-api-access-srsbb\") pod \"route-controller-manager-59d44967dc-4sg9t\" (UID: \"57581964-bce3-40d8-adb8-232887e287cb\") " pod="openshift-route-controller-manager/route-controller-manager-59d44967dc-4sg9t" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.635205 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/89bf389a-9986-497d-b933-be8274c33271-client-ca\") pod \"controller-manager-7c9dc89848-fmcxj\" (UID: \"89bf389a-9986-497d-b933-be8274c33271\") " pod="openshift-controller-manager/controller-manager-7c9dc89848-fmcxj" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.636193 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/89bf389a-9986-497d-b933-be8274c33271-proxy-ca-bundles\") pod \"controller-manager-7c9dc89848-fmcxj\" (UID: \"89bf389a-9986-497d-b933-be8274c33271\") " pod="openshift-controller-manager/controller-manager-7c9dc89848-fmcxj" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.636420 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/57581964-bce3-40d8-adb8-232887e287cb-client-ca\") pod \"route-controller-manager-59d44967dc-4sg9t\" (UID: \"57581964-bce3-40d8-adb8-232887e287cb\") " pod="openshift-route-controller-manager/route-controller-manager-59d44967dc-4sg9t" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.636524 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/89bf389a-9986-497d-b933-be8274c33271-config\") pod \"controller-manager-7c9dc89848-fmcxj\" (UID: \"89bf389a-9986-497d-b933-be8274c33271\") " pod="openshift-controller-manager/controller-manager-7c9dc89848-fmcxj" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.636866 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/57581964-bce3-40d8-adb8-232887e287cb-config\") pod \"route-controller-manager-59d44967dc-4sg9t\" (UID: \"57581964-bce3-40d8-adb8-232887e287cb\") " pod="openshift-route-controller-manager/route-controller-manager-59d44967dc-4sg9t" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.642167 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/57581964-bce3-40d8-adb8-232887e287cb-serving-cert\") pod \"route-controller-manager-59d44967dc-4sg9t\" (UID: \"57581964-bce3-40d8-adb8-232887e287cb\") " pod="openshift-route-controller-manager/route-controller-manager-59d44967dc-4sg9t" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.649965 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/89bf389a-9986-497d-b933-be8274c33271-serving-cert\") pod \"controller-manager-7c9dc89848-fmcxj\" (UID: \"89bf389a-9986-497d-b933-be8274c33271\") " pod="openshift-controller-manager/controller-manager-7c9dc89848-fmcxj" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.656037 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-srsbb\" (UniqueName: \"kubernetes.io/projected/57581964-bce3-40d8-adb8-232887e287cb-kube-api-access-srsbb\") pod \"route-controller-manager-59d44967dc-4sg9t\" (UID: \"57581964-bce3-40d8-adb8-232887e287cb\") " pod="openshift-route-controller-manager/route-controller-manager-59d44967dc-4sg9t" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.661392 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f44td\" (UniqueName: \"kubernetes.io/projected/89bf389a-9986-497d-b933-be8274c33271-kube-api-access-f44td\") pod \"controller-manager-7c9dc89848-fmcxj\" (UID: \"89bf389a-9986-497d-b933-be8274c33271\") " pod="openshift-controller-manager/controller-manager-7c9dc89848-fmcxj" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.741700 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-59d44967dc-4sg9t" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.751652 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7c9dc89848-fmcxj" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.786805 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="53aa0d7f-022e-46a2-9e47-442eca753bbc" path="/var/lib/kubelet/pods/53aa0d7f-022e-46a2-9e47-442eca753bbc/volumes" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.787518 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d32b123-7986-4bd2-abdf-b8be8c855817" path="/var/lib/kubelet/pods/6d32b123-7986-4bd2-abdf-b8be8c855817/volumes" Nov 24 08:10:10 crc kubenswrapper[4691]: I1124 08:10:10.904538 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw"] Nov 24 08:10:11 crc kubenswrapper[4691]: I1124 08:10:11.081289 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7c9dc89848-fmcxj"] Nov 24 08:10:11 crc kubenswrapper[4691]: I1124 08:10:11.230697 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-59d44967dc-4sg9t"] Nov 24 08:10:11 crc kubenswrapper[4691]: W1124 08:10:11.242620 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod57581964_bce3_40d8_adb8_232887e287cb.slice/crio-b5be556a620b0b6d96704aacca09c4d0f6355912a84242a8dca81d51c5f07cae WatchSource:0}: Error finding container b5be556a620b0b6d96704aacca09c4d0f6355912a84242a8dca81d51c5f07cae: Status 404 returned error can't find the container with id b5be556a620b0b6d96704aacca09c4d0f6355912a84242a8dca81d51c5f07cae Nov 24 08:10:11 crc kubenswrapper[4691]: I1124 08:10:11.358545 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7c9dc89848-fmcxj" event={"ID":"89bf389a-9986-497d-b933-be8274c33271","Type":"ContainerStarted","Data":"0417d50be1da4d40f3c663e2e8220b7dea81c568b36bcfd9338fd1908745e39e"} Nov 24 08:10:11 crc kubenswrapper[4691]: I1124 08:10:11.358610 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7c9dc89848-fmcxj" event={"ID":"89bf389a-9986-497d-b933-be8274c33271","Type":"ContainerStarted","Data":"1927d7e5a9e7168d734f6bc0adedc777887941f43bfb314f2e1515d3da76e270"} Nov 24 08:10:11 crc kubenswrapper[4691]: I1124 08:10:11.358816 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-7c9dc89848-fmcxj" Nov 24 08:10:11 crc kubenswrapper[4691]: I1124 08:10:11.360212 4691 patch_prober.go:28] interesting pod/controller-manager-7c9dc89848-fmcxj container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.66:8443/healthz\": dial tcp 10.217.0.66:8443: connect: connection refused" start-of-body= Nov 24 08:10:11 crc kubenswrapper[4691]: I1124 08:10:11.360267 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-7c9dc89848-fmcxj" podUID="89bf389a-9986-497d-b933-be8274c33271" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.66:8443/healthz\": dial tcp 10.217.0.66:8443: connect: connection refused" Nov 24 08:10:11 crc kubenswrapper[4691]: I1124 08:10:11.362533 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-59d44967dc-4sg9t" event={"ID":"57581964-bce3-40d8-adb8-232887e287cb","Type":"ContainerStarted","Data":"b5be556a620b0b6d96704aacca09c4d0f6355912a84242a8dca81d51c5f07cae"} Nov 24 08:10:11 crc kubenswrapper[4691]: I1124 08:10:11.365309 4691 generic.go:334] "Generic (PLEG): container finished" podID="d08a87e6-8ffb-408e-86d2-ff4994f07ed9" containerID="3aedb4d98ed91a9fa20c70d90e16c1be9c9bfcded3cfca22589958ddcd7a1f4a" exitCode=0 Nov 24 08:10:11 crc kubenswrapper[4691]: I1124 08:10:11.365361 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw" event={"ID":"d08a87e6-8ffb-408e-86d2-ff4994f07ed9","Type":"ContainerDied","Data":"3aedb4d98ed91a9fa20c70d90e16c1be9c9bfcded3cfca22589958ddcd7a1f4a"} Nov 24 08:10:11 crc kubenswrapper[4691]: I1124 08:10:11.365393 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw" event={"ID":"d08a87e6-8ffb-408e-86d2-ff4994f07ed9","Type":"ContainerStarted","Data":"1bea518f412866caf3cb94cbd4de9d8d0587ffcd3c4d9de47c038d72b94a0fea"} Nov 24 08:10:11 crc kubenswrapper[4691]: I1124 08:10:11.380426 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-7c9dc89848-fmcxj" podStartSLOduration=3.380400947 podStartE2EDuration="3.380400947s" podCreationTimestamp="2025-11-24 08:10:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:10:11.376310588 +0000 UTC m=+773.375259847" watchObservedRunningTime="2025-11-24 08:10:11.380400947 +0000 UTC m=+773.379350196" Nov 24 08:10:12 crc kubenswrapper[4691]: I1124 08:10:12.376373 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-59d44967dc-4sg9t" event={"ID":"57581964-bce3-40d8-adb8-232887e287cb","Type":"ContainerStarted","Data":"bd0a3b1409e068e8a9dc81649841ece01f53075b11d9eadefbad29b79a789138"} Nov 24 08:10:12 crc kubenswrapper[4691]: I1124 08:10:12.378389 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-59d44967dc-4sg9t" Nov 24 08:10:12 crc kubenswrapper[4691]: I1124 08:10:12.381140 4691 generic.go:334] "Generic (PLEG): container finished" podID="d08a87e6-8ffb-408e-86d2-ff4994f07ed9" containerID="88ebe4e979e763475c33ba579c46fdd308a79ec2d2680b340876fb8893d50687" exitCode=0 Nov 24 08:10:12 crc kubenswrapper[4691]: I1124 08:10:12.381225 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw" event={"ID":"d08a87e6-8ffb-408e-86d2-ff4994f07ed9","Type":"ContainerDied","Data":"88ebe4e979e763475c33ba579c46fdd308a79ec2d2680b340876fb8893d50687"} Nov 24 08:10:12 crc kubenswrapper[4691]: I1124 08:10:12.388174 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-59d44967dc-4sg9t" Nov 24 08:10:12 crc kubenswrapper[4691]: I1124 08:10:12.388296 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-7c9dc89848-fmcxj" Nov 24 08:10:12 crc kubenswrapper[4691]: I1124 08:10:12.429087 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-59d44967dc-4sg9t" podStartSLOduration=4.429056844 podStartE2EDuration="4.429056844s" podCreationTimestamp="2025-11-24 08:10:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:10:12.406092859 +0000 UTC m=+774.405042148" watchObservedRunningTime="2025-11-24 08:10:12.429056844 +0000 UTC m=+774.428006143" Nov 24 08:10:13 crc kubenswrapper[4691]: I1124 08:10:13.392053 4691 generic.go:334] "Generic (PLEG): container finished" podID="d08a87e6-8ffb-408e-86d2-ff4994f07ed9" containerID="f617a495fabb7a1269ca54d67c310c604e053758a0c3a1e3140a28fa61f764a1" exitCode=0 Nov 24 08:10:13 crc kubenswrapper[4691]: I1124 08:10:13.392096 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw" event={"ID":"d08a87e6-8ffb-408e-86d2-ff4994f07ed9","Type":"ContainerDied","Data":"f617a495fabb7a1269ca54d67c310c604e053758a0c3a1e3140a28fa61f764a1"} Nov 24 08:10:14 crc kubenswrapper[4691]: I1124 08:10:14.764605 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw" Nov 24 08:10:14 crc kubenswrapper[4691]: I1124 08:10:14.917011 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d08a87e6-8ffb-408e-86d2-ff4994f07ed9-util\") pod \"d08a87e6-8ffb-408e-86d2-ff4994f07ed9\" (UID: \"d08a87e6-8ffb-408e-86d2-ff4994f07ed9\") " Nov 24 08:10:14 crc kubenswrapper[4691]: I1124 08:10:14.917155 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d08a87e6-8ffb-408e-86d2-ff4994f07ed9-bundle\") pod \"d08a87e6-8ffb-408e-86d2-ff4994f07ed9\" (UID: \"d08a87e6-8ffb-408e-86d2-ff4994f07ed9\") " Nov 24 08:10:14 crc kubenswrapper[4691]: I1124 08:10:14.917257 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7jzg7\" (UniqueName: \"kubernetes.io/projected/d08a87e6-8ffb-408e-86d2-ff4994f07ed9-kube-api-access-7jzg7\") pod \"d08a87e6-8ffb-408e-86d2-ff4994f07ed9\" (UID: \"d08a87e6-8ffb-408e-86d2-ff4994f07ed9\") " Nov 24 08:10:14 crc kubenswrapper[4691]: I1124 08:10:14.918289 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d08a87e6-8ffb-408e-86d2-ff4994f07ed9-bundle" (OuterVolumeSpecName: "bundle") pod "d08a87e6-8ffb-408e-86d2-ff4994f07ed9" (UID: "d08a87e6-8ffb-408e-86d2-ff4994f07ed9"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:10:14 crc kubenswrapper[4691]: I1124 08:10:14.923822 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d08a87e6-8ffb-408e-86d2-ff4994f07ed9-kube-api-access-7jzg7" (OuterVolumeSpecName: "kube-api-access-7jzg7") pod "d08a87e6-8ffb-408e-86d2-ff4994f07ed9" (UID: "d08a87e6-8ffb-408e-86d2-ff4994f07ed9"). InnerVolumeSpecName "kube-api-access-7jzg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:10:14 crc kubenswrapper[4691]: I1124 08:10:14.936527 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d08a87e6-8ffb-408e-86d2-ff4994f07ed9-util" (OuterVolumeSpecName: "util") pod "d08a87e6-8ffb-408e-86d2-ff4994f07ed9" (UID: "d08a87e6-8ffb-408e-86d2-ff4994f07ed9"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:10:15 crc kubenswrapper[4691]: I1124 08:10:15.019079 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7jzg7\" (UniqueName: \"kubernetes.io/projected/d08a87e6-8ffb-408e-86d2-ff4994f07ed9-kube-api-access-7jzg7\") on node \"crc\" DevicePath \"\"" Nov 24 08:10:15 crc kubenswrapper[4691]: I1124 08:10:15.019128 4691 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d08a87e6-8ffb-408e-86d2-ff4994f07ed9-util\") on node \"crc\" DevicePath \"\"" Nov 24 08:10:15 crc kubenswrapper[4691]: I1124 08:10:15.019146 4691 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d08a87e6-8ffb-408e-86d2-ff4994f07ed9-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:10:15 crc kubenswrapper[4691]: I1124 08:10:15.343210 4691 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 24 08:10:15 crc kubenswrapper[4691]: I1124 08:10:15.408481 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw" event={"ID":"d08a87e6-8ffb-408e-86d2-ff4994f07ed9","Type":"ContainerDied","Data":"1bea518f412866caf3cb94cbd4de9d8d0587ffcd3c4d9de47c038d72b94a0fea"} Nov 24 08:10:15 crc kubenswrapper[4691]: I1124 08:10:15.408529 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1bea518f412866caf3cb94cbd4de9d8d0587ffcd3c4d9de47c038d72b94a0fea" Nov 24 08:10:15 crc kubenswrapper[4691]: I1124 08:10:15.408922 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw" Nov 24 08:10:17 crc kubenswrapper[4691]: I1124 08:10:17.467719 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-54cb99d74c-jrmkn"] Nov 24 08:10:17 crc kubenswrapper[4691]: E1124 08:10:17.468382 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d08a87e6-8ffb-408e-86d2-ff4994f07ed9" containerName="extract" Nov 24 08:10:17 crc kubenswrapper[4691]: I1124 08:10:17.468399 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="d08a87e6-8ffb-408e-86d2-ff4994f07ed9" containerName="extract" Nov 24 08:10:17 crc kubenswrapper[4691]: E1124 08:10:17.468430 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d08a87e6-8ffb-408e-86d2-ff4994f07ed9" containerName="util" Nov 24 08:10:17 crc kubenswrapper[4691]: I1124 08:10:17.468457 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="d08a87e6-8ffb-408e-86d2-ff4994f07ed9" containerName="util" Nov 24 08:10:17 crc kubenswrapper[4691]: E1124 08:10:17.468470 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d08a87e6-8ffb-408e-86d2-ff4994f07ed9" containerName="pull" Nov 24 08:10:17 crc kubenswrapper[4691]: I1124 08:10:17.468478 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="d08a87e6-8ffb-408e-86d2-ff4994f07ed9" containerName="pull" Nov 24 08:10:17 crc kubenswrapper[4691]: I1124 08:10:17.468606 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="d08a87e6-8ffb-408e-86d2-ff4994f07ed9" containerName="extract" Nov 24 08:10:17 crc kubenswrapper[4691]: I1124 08:10:17.469106 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-54cb99d74c-jrmkn" Nov 24 08:10:17 crc kubenswrapper[4691]: I1124 08:10:17.474062 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-rbnw7" Nov 24 08:10:17 crc kubenswrapper[4691]: I1124 08:10:17.508597 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-54cb99d74c-jrmkn"] Nov 24 08:10:17 crc kubenswrapper[4691]: I1124 08:10:17.655265 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nzrrz\" (UniqueName: \"kubernetes.io/projected/2aa0febc-e96d-419c-855c-bae0db1c6d11-kube-api-access-nzrrz\") pod \"openstack-operator-controller-operator-54cb99d74c-jrmkn\" (UID: \"2aa0febc-e96d-419c-855c-bae0db1c6d11\") " pod="openstack-operators/openstack-operator-controller-operator-54cb99d74c-jrmkn" Nov 24 08:10:17 crc kubenswrapper[4691]: I1124 08:10:17.756570 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nzrrz\" (UniqueName: \"kubernetes.io/projected/2aa0febc-e96d-419c-855c-bae0db1c6d11-kube-api-access-nzrrz\") pod \"openstack-operator-controller-operator-54cb99d74c-jrmkn\" (UID: \"2aa0febc-e96d-419c-855c-bae0db1c6d11\") " pod="openstack-operators/openstack-operator-controller-operator-54cb99d74c-jrmkn" Nov 24 08:10:17 crc kubenswrapper[4691]: I1124 08:10:17.777001 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nzrrz\" (UniqueName: \"kubernetes.io/projected/2aa0febc-e96d-419c-855c-bae0db1c6d11-kube-api-access-nzrrz\") pod \"openstack-operator-controller-operator-54cb99d74c-jrmkn\" (UID: \"2aa0febc-e96d-419c-855c-bae0db1c6d11\") " pod="openstack-operators/openstack-operator-controller-operator-54cb99d74c-jrmkn" Nov 24 08:10:17 crc kubenswrapper[4691]: I1124 08:10:17.788366 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-54cb99d74c-jrmkn" Nov 24 08:10:18 crc kubenswrapper[4691]: I1124 08:10:18.294780 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-54cb99d74c-jrmkn"] Nov 24 08:10:18 crc kubenswrapper[4691]: I1124 08:10:18.432955 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-54cb99d74c-jrmkn" event={"ID":"2aa0febc-e96d-419c-855c-bae0db1c6d11","Type":"ContainerStarted","Data":"bead2e6b073eabcfeb9e0ef649b418cfcb1e85ee415be228127dfb1db7c0751a"} Nov 24 08:10:21 crc kubenswrapper[4691]: I1124 08:10:21.089738 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:10:21 crc kubenswrapper[4691]: I1124 08:10:21.090845 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:10:21 crc kubenswrapper[4691]: I1124 08:10:21.090916 4691 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 08:10:21 crc kubenswrapper[4691]: I1124 08:10:21.091677 4691 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4adbbde14ca91fb132e770900c2c7d789c1a43897b472649dcf3666cd980576b"} pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 08:10:21 crc kubenswrapper[4691]: I1124 08:10:21.091737 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" containerID="cri-o://4adbbde14ca91fb132e770900c2c7d789c1a43897b472649dcf3666cd980576b" gracePeriod=600 Nov 24 08:10:21 crc kubenswrapper[4691]: I1124 08:10:21.463654 4691 generic.go:334] "Generic (PLEG): container finished" podID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerID="4adbbde14ca91fb132e770900c2c7d789c1a43897b472649dcf3666cd980576b" exitCode=0 Nov 24 08:10:21 crc kubenswrapper[4691]: I1124 08:10:21.463710 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerDied","Data":"4adbbde14ca91fb132e770900c2c7d789c1a43897b472649dcf3666cd980576b"} Nov 24 08:10:21 crc kubenswrapper[4691]: I1124 08:10:21.463786 4691 scope.go:117] "RemoveContainer" containerID="2f66d4866e1d53d3e1351796c2aebb19bcfe0badbd5b9c37eb4d97650922dfd8" Nov 24 08:10:22 crc kubenswrapper[4691]: I1124 08:10:22.474827 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerStarted","Data":"8d580292dc3a8a86e61ece515d1a697fe0192e1bffaa2352b8d538c10b88fced"} Nov 24 08:10:22 crc kubenswrapper[4691]: I1124 08:10:22.476520 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-54cb99d74c-jrmkn" event={"ID":"2aa0febc-e96d-419c-855c-bae0db1c6d11","Type":"ContainerStarted","Data":"fec604899be15c66e52384295619801f06dad83dbd3994b885b31040eddc672a"} Nov 24 08:10:22 crc kubenswrapper[4691]: I1124 08:10:22.476643 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-54cb99d74c-jrmkn" Nov 24 08:10:22 crc kubenswrapper[4691]: I1124 08:10:22.530817 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-54cb99d74c-jrmkn" podStartSLOduration=1.66309928 podStartE2EDuration="5.530797626s" podCreationTimestamp="2025-11-24 08:10:17 +0000 UTC" firstStartedPulling="2025-11-24 08:10:18.309616507 +0000 UTC m=+780.308565756" lastFinishedPulling="2025-11-24 08:10:22.177314843 +0000 UTC m=+784.176264102" observedRunningTime="2025-11-24 08:10:22.529296042 +0000 UTC m=+784.528245311" watchObservedRunningTime="2025-11-24 08:10:22.530797626 +0000 UTC m=+784.529746875" Nov 24 08:10:27 crc kubenswrapper[4691]: I1124 08:10:27.792245 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-54cb99d74c-jrmkn" Nov 24 08:10:33 crc kubenswrapper[4691]: I1124 08:10:33.892820 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-bb2l8"] Nov 24 08:10:33 crc kubenswrapper[4691]: I1124 08:10:33.895553 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bb2l8" Nov 24 08:10:33 crc kubenswrapper[4691]: I1124 08:10:33.936305 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bb2l8"] Nov 24 08:10:34 crc kubenswrapper[4691]: I1124 08:10:34.005498 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29cbcb95-d722-413b-893c-f04bdb5ea301-utilities\") pod \"redhat-operators-bb2l8\" (UID: \"29cbcb95-d722-413b-893c-f04bdb5ea301\") " pod="openshift-marketplace/redhat-operators-bb2l8" Nov 24 08:10:34 crc kubenswrapper[4691]: I1124 08:10:34.005563 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nkrxc\" (UniqueName: \"kubernetes.io/projected/29cbcb95-d722-413b-893c-f04bdb5ea301-kube-api-access-nkrxc\") pod \"redhat-operators-bb2l8\" (UID: \"29cbcb95-d722-413b-893c-f04bdb5ea301\") " pod="openshift-marketplace/redhat-operators-bb2l8" Nov 24 08:10:34 crc kubenswrapper[4691]: I1124 08:10:34.005594 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29cbcb95-d722-413b-893c-f04bdb5ea301-catalog-content\") pod \"redhat-operators-bb2l8\" (UID: \"29cbcb95-d722-413b-893c-f04bdb5ea301\") " pod="openshift-marketplace/redhat-operators-bb2l8" Nov 24 08:10:34 crc kubenswrapper[4691]: I1124 08:10:34.107485 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29cbcb95-d722-413b-893c-f04bdb5ea301-utilities\") pod \"redhat-operators-bb2l8\" (UID: \"29cbcb95-d722-413b-893c-f04bdb5ea301\") " pod="openshift-marketplace/redhat-operators-bb2l8" Nov 24 08:10:34 crc kubenswrapper[4691]: I1124 08:10:34.107541 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nkrxc\" (UniqueName: \"kubernetes.io/projected/29cbcb95-d722-413b-893c-f04bdb5ea301-kube-api-access-nkrxc\") pod \"redhat-operators-bb2l8\" (UID: \"29cbcb95-d722-413b-893c-f04bdb5ea301\") " pod="openshift-marketplace/redhat-operators-bb2l8" Nov 24 08:10:34 crc kubenswrapper[4691]: I1124 08:10:34.107573 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29cbcb95-d722-413b-893c-f04bdb5ea301-catalog-content\") pod \"redhat-operators-bb2l8\" (UID: \"29cbcb95-d722-413b-893c-f04bdb5ea301\") " pod="openshift-marketplace/redhat-operators-bb2l8" Nov 24 08:10:34 crc kubenswrapper[4691]: I1124 08:10:34.108198 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29cbcb95-d722-413b-893c-f04bdb5ea301-utilities\") pod \"redhat-operators-bb2l8\" (UID: \"29cbcb95-d722-413b-893c-f04bdb5ea301\") " pod="openshift-marketplace/redhat-operators-bb2l8" Nov 24 08:10:34 crc kubenswrapper[4691]: I1124 08:10:34.108250 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29cbcb95-d722-413b-893c-f04bdb5ea301-catalog-content\") pod \"redhat-operators-bb2l8\" (UID: \"29cbcb95-d722-413b-893c-f04bdb5ea301\") " pod="openshift-marketplace/redhat-operators-bb2l8" Nov 24 08:10:34 crc kubenswrapper[4691]: I1124 08:10:34.149737 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nkrxc\" (UniqueName: \"kubernetes.io/projected/29cbcb95-d722-413b-893c-f04bdb5ea301-kube-api-access-nkrxc\") pod \"redhat-operators-bb2l8\" (UID: \"29cbcb95-d722-413b-893c-f04bdb5ea301\") " pod="openshift-marketplace/redhat-operators-bb2l8" Nov 24 08:10:34 crc kubenswrapper[4691]: I1124 08:10:34.215381 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bb2l8" Nov 24 08:10:34 crc kubenswrapper[4691]: I1124 08:10:34.790940 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bb2l8"] Nov 24 08:10:34 crc kubenswrapper[4691]: W1124 08:10:34.813064 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod29cbcb95_d722_413b_893c_f04bdb5ea301.slice/crio-fc6f59b0006f3ed0c834a9fede06f174f2f11e9cd58e0391e5c3213f50ad652f WatchSource:0}: Error finding container fc6f59b0006f3ed0c834a9fede06f174f2f11e9cd58e0391e5c3213f50ad652f: Status 404 returned error can't find the container with id fc6f59b0006f3ed0c834a9fede06f174f2f11e9cd58e0391e5c3213f50ad652f Nov 24 08:10:35 crc kubenswrapper[4691]: I1124 08:10:35.574692 4691 generic.go:334] "Generic (PLEG): container finished" podID="29cbcb95-d722-413b-893c-f04bdb5ea301" containerID="f5f3da8ad5456d8ce68bb72db1ba882f4fc6fe80ea58175bf692c1bd7f4bcecd" exitCode=0 Nov 24 08:10:35 crc kubenswrapper[4691]: I1124 08:10:35.574933 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bb2l8" event={"ID":"29cbcb95-d722-413b-893c-f04bdb5ea301","Type":"ContainerDied","Data":"f5f3da8ad5456d8ce68bb72db1ba882f4fc6fe80ea58175bf692c1bd7f4bcecd"} Nov 24 08:10:35 crc kubenswrapper[4691]: I1124 08:10:35.574959 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bb2l8" event={"ID":"29cbcb95-d722-413b-893c-f04bdb5ea301","Type":"ContainerStarted","Data":"fc6f59b0006f3ed0c834a9fede06f174f2f11e9cd58e0391e5c3213f50ad652f"} Nov 24 08:10:36 crc kubenswrapper[4691]: I1124 08:10:36.585481 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bb2l8" event={"ID":"29cbcb95-d722-413b-893c-f04bdb5ea301","Type":"ContainerStarted","Data":"19e90b80868244715aee6f90bf45117ab1b221de14ccfd13240b9eb9fb36d3d2"} Nov 24 08:10:37 crc kubenswrapper[4691]: I1124 08:10:37.595742 4691 generic.go:334] "Generic (PLEG): container finished" podID="29cbcb95-d722-413b-893c-f04bdb5ea301" containerID="19e90b80868244715aee6f90bf45117ab1b221de14ccfd13240b9eb9fb36d3d2" exitCode=0 Nov 24 08:10:37 crc kubenswrapper[4691]: I1124 08:10:37.595800 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bb2l8" event={"ID":"29cbcb95-d722-413b-893c-f04bdb5ea301","Type":"ContainerDied","Data":"19e90b80868244715aee6f90bf45117ab1b221de14ccfd13240b9eb9fb36d3d2"} Nov 24 08:10:38 crc kubenswrapper[4691]: I1124 08:10:38.606327 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bb2l8" event={"ID":"29cbcb95-d722-413b-893c-f04bdb5ea301","Type":"ContainerStarted","Data":"0eb39cdc698157e16739daa225e56d9787c696c5655714a544c8719dc1bd64d5"} Nov 24 08:10:38 crc kubenswrapper[4691]: I1124 08:10:38.629610 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-bb2l8" podStartSLOduration=3.103046182 podStartE2EDuration="5.629590655s" podCreationTimestamp="2025-11-24 08:10:33 +0000 UTC" firstStartedPulling="2025-11-24 08:10:35.576559006 +0000 UTC m=+797.575508255" lastFinishedPulling="2025-11-24 08:10:38.103103479 +0000 UTC m=+800.102052728" observedRunningTime="2025-11-24 08:10:38.625154277 +0000 UTC m=+800.624103526" watchObservedRunningTime="2025-11-24 08:10:38.629590655 +0000 UTC m=+800.628539904" Nov 24 08:10:39 crc kubenswrapper[4691]: I1124 08:10:39.874535 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-st4t6"] Nov 24 08:10:39 crc kubenswrapper[4691]: I1124 08:10:39.876149 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-st4t6" Nov 24 08:10:39 crc kubenswrapper[4691]: I1124 08:10:39.896185 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-st4t6"] Nov 24 08:10:39 crc kubenswrapper[4691]: I1124 08:10:39.997993 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xs2nv\" (UniqueName: \"kubernetes.io/projected/c5492776-03c1-422f-8097-e69c5fbf459b-kube-api-access-xs2nv\") pod \"certified-operators-st4t6\" (UID: \"c5492776-03c1-422f-8097-e69c5fbf459b\") " pod="openshift-marketplace/certified-operators-st4t6" Nov 24 08:10:39 crc kubenswrapper[4691]: I1124 08:10:39.998085 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c5492776-03c1-422f-8097-e69c5fbf459b-catalog-content\") pod \"certified-operators-st4t6\" (UID: \"c5492776-03c1-422f-8097-e69c5fbf459b\") " pod="openshift-marketplace/certified-operators-st4t6" Nov 24 08:10:39 crc kubenswrapper[4691]: I1124 08:10:39.998128 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c5492776-03c1-422f-8097-e69c5fbf459b-utilities\") pod \"certified-operators-st4t6\" (UID: \"c5492776-03c1-422f-8097-e69c5fbf459b\") " pod="openshift-marketplace/certified-operators-st4t6" Nov 24 08:10:40 crc kubenswrapper[4691]: I1124 08:10:40.099255 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xs2nv\" (UniqueName: \"kubernetes.io/projected/c5492776-03c1-422f-8097-e69c5fbf459b-kube-api-access-xs2nv\") pod \"certified-operators-st4t6\" (UID: \"c5492776-03c1-422f-8097-e69c5fbf459b\") " pod="openshift-marketplace/certified-operators-st4t6" Nov 24 08:10:40 crc kubenswrapper[4691]: I1124 08:10:40.099354 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c5492776-03c1-422f-8097-e69c5fbf459b-catalog-content\") pod \"certified-operators-st4t6\" (UID: \"c5492776-03c1-422f-8097-e69c5fbf459b\") " pod="openshift-marketplace/certified-operators-st4t6" Nov 24 08:10:40 crc kubenswrapper[4691]: I1124 08:10:40.099402 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c5492776-03c1-422f-8097-e69c5fbf459b-utilities\") pod \"certified-operators-st4t6\" (UID: \"c5492776-03c1-422f-8097-e69c5fbf459b\") " pod="openshift-marketplace/certified-operators-st4t6" Nov 24 08:10:40 crc kubenswrapper[4691]: I1124 08:10:40.100045 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c5492776-03c1-422f-8097-e69c5fbf459b-catalog-content\") pod \"certified-operators-st4t6\" (UID: \"c5492776-03c1-422f-8097-e69c5fbf459b\") " pod="openshift-marketplace/certified-operators-st4t6" Nov 24 08:10:40 crc kubenswrapper[4691]: I1124 08:10:40.100087 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c5492776-03c1-422f-8097-e69c5fbf459b-utilities\") pod \"certified-operators-st4t6\" (UID: \"c5492776-03c1-422f-8097-e69c5fbf459b\") " pod="openshift-marketplace/certified-operators-st4t6" Nov 24 08:10:40 crc kubenswrapper[4691]: I1124 08:10:40.140692 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xs2nv\" (UniqueName: \"kubernetes.io/projected/c5492776-03c1-422f-8097-e69c5fbf459b-kube-api-access-xs2nv\") pod \"certified-operators-st4t6\" (UID: \"c5492776-03c1-422f-8097-e69c5fbf459b\") " pod="openshift-marketplace/certified-operators-st4t6" Nov 24 08:10:40 crc kubenswrapper[4691]: I1124 08:10:40.194437 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-st4t6" Nov 24 08:10:40 crc kubenswrapper[4691]: I1124 08:10:40.750979 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-st4t6"] Nov 24 08:10:41 crc kubenswrapper[4691]: I1124 08:10:41.628557 4691 generic.go:334] "Generic (PLEG): container finished" podID="c5492776-03c1-422f-8097-e69c5fbf459b" containerID="a8deeb1991762b71afa08ebe2318b831294eee56d96dfcf5ea05d955fee0e9cd" exitCode=0 Nov 24 08:10:41 crc kubenswrapper[4691]: I1124 08:10:41.628651 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-st4t6" event={"ID":"c5492776-03c1-422f-8097-e69c5fbf459b","Type":"ContainerDied","Data":"a8deeb1991762b71afa08ebe2318b831294eee56d96dfcf5ea05d955fee0e9cd"} Nov 24 08:10:41 crc kubenswrapper[4691]: I1124 08:10:41.629070 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-st4t6" event={"ID":"c5492776-03c1-422f-8097-e69c5fbf459b","Type":"ContainerStarted","Data":"b75e45a11993753ae0c8af0603f175e7d4cfef6ed51c4cba054cbea746c2edd6"} Nov 24 08:10:42 crc kubenswrapper[4691]: I1124 08:10:42.637166 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-st4t6" event={"ID":"c5492776-03c1-422f-8097-e69c5fbf459b","Type":"ContainerStarted","Data":"7089cb8f3da2e0988ca4e9f8a67ab76a25791ace26453b05003cd9f52129ef7a"} Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.001370 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ppdhs"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.002928 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ppdhs" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.006916 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-hx8bg" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.029102 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-7d695c9b56-ncq2x"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.030285 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-ncq2x" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.036298 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-tpx26" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.038543 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-79856dc55c-6jgx4"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.040276 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-6jgx4" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.043969 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ppdhs"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.050991 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-rhbnk" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.071490 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-68b95954c9-f7g9v"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.072724 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-f7g9v" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.075235 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-8bgsc" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.101395 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-7d695c9b56-ncq2x"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.104164 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-79856dc55c-6jgx4"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.128822 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-68b95954c9-f7g9v"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.143113 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-774b86978c-cql69"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.144704 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-774b86978c-cql69" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.148516 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-js7cb" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.150970 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sbdmt\" (UniqueName: \"kubernetes.io/projected/132ed997-05f1-4484-a11a-3e282b0e889b-kube-api-access-sbdmt\") pod \"cinder-operator-controller-manager-79856dc55c-6jgx4\" (UID: \"132ed997-05f1-4484-a11a-3e282b0e889b\") " pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-6jgx4" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.151119 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h4nrb\" (UniqueName: \"kubernetes.io/projected/24f62db2-c526-493e-a703-43a661ea0228-kube-api-access-h4nrb\") pod \"designate-operator-controller-manager-7d695c9b56-ncq2x\" (UID: \"24f62db2-c526-493e-a703-43a661ea0228\") " pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-ncq2x" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.151554 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k8wbm\" (UniqueName: \"kubernetes.io/projected/22fec998-136d-4bc0-9db1-1e4ac6e1107c-kube-api-access-k8wbm\") pod \"barbican-operator-controller-manager-86dc4d89c8-ppdhs\" (UID: \"22fec998-136d-4bc0-9db1-1e4ac6e1107c\") " pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ppdhs" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.163518 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-774b86978c-cql69"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.202638 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c9694994-jctfk"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.204214 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jctfk" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.207006 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-nxhtc" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.207463 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-d5cc86f4b-v7vtk"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.208770 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-v7vtk" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.212647 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5bfcdc958c-nfx6g"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.221614 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-nfx6g" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.224423 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c9694994-jctfk"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.227110 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-wmw4h" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.239932 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-j9q97" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.240159 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.253278 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sbdmt\" (UniqueName: \"kubernetes.io/projected/132ed997-05f1-4484-a11a-3e282b0e889b-kube-api-access-sbdmt\") pod \"cinder-operator-controller-manager-79856dc55c-6jgx4\" (UID: \"132ed997-05f1-4484-a11a-3e282b0e889b\") " pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-6jgx4" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.253319 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h4nrb\" (UniqueName: \"kubernetes.io/projected/24f62db2-c526-493e-a703-43a661ea0228-kube-api-access-h4nrb\") pod \"designate-operator-controller-manager-7d695c9b56-ncq2x\" (UID: \"24f62db2-c526-493e-a703-43a661ea0228\") " pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-ncq2x" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.253346 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k8wbm\" (UniqueName: \"kubernetes.io/projected/22fec998-136d-4bc0-9db1-1e4ac6e1107c-kube-api-access-k8wbm\") pod \"barbican-operator-controller-manager-86dc4d89c8-ppdhs\" (UID: \"22fec998-136d-4bc0-9db1-1e4ac6e1107c\") " pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ppdhs" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.253371 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8lbqp\" (UniqueName: \"kubernetes.io/projected/bc63ea3b-7e56-46cc-b2e9-2ff7b0cddc2e-kube-api-access-8lbqp\") pod \"glance-operator-controller-manager-68b95954c9-f7g9v\" (UID: \"bc63ea3b-7e56-46cc-b2e9-2ff7b0cddc2e\") " pod="openstack-operators/glance-operator-controller-manager-68b95954c9-f7g9v" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.253396 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d985q\" (UniqueName: \"kubernetes.io/projected/df3746c8-ec8b-406e-b2f5-7bd93dd46646-kube-api-access-d985q\") pod \"heat-operator-controller-manager-774b86978c-cql69\" (UID: \"df3746c8-ec8b-406e-b2f5-7bd93dd46646\") " pod="openstack-operators/heat-operator-controller-manager-774b86978c-cql69" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.253354 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-748dc6576f-2w275"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.254981 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-2w275" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.259466 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-d5cc86f4b-v7vtk"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.272786 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-mdkfq" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.343897 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k8wbm\" (UniqueName: \"kubernetes.io/projected/22fec998-136d-4bc0-9db1-1e4ac6e1107c-kube-api-access-k8wbm\") pod \"barbican-operator-controller-manager-86dc4d89c8-ppdhs\" (UID: \"22fec998-136d-4bc0-9db1-1e4ac6e1107c\") " pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ppdhs" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.344282 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sbdmt\" (UniqueName: \"kubernetes.io/projected/132ed997-05f1-4484-a11a-3e282b0e889b-kube-api-access-sbdmt\") pod \"cinder-operator-controller-manager-79856dc55c-6jgx4\" (UID: \"132ed997-05f1-4484-a11a-3e282b0e889b\") " pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-6jgx4" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.355130 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vls42\" (UniqueName: \"kubernetes.io/projected/f8a9119f-fc7e-4bb6-89da-91f7655c633d-kube-api-access-vls42\") pod \"ironic-operator-controller-manager-5bfcdc958c-nfx6g\" (UID: \"f8a9119f-fc7e-4bb6-89da-91f7655c633d\") " pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-nfx6g" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.356600 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dfz7j\" (UniqueName: \"kubernetes.io/projected/be284da4-49c2-4967-a810-eb5dbece93a3-kube-api-access-dfz7j\") pod \"keystone-operator-controller-manager-748dc6576f-2w275\" (UID: \"be284da4-49c2-4967-a810-eb5dbece93a3\") " pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-2w275" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.356642 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zbjv7\" (UniqueName: \"kubernetes.io/projected/7e82629b-ee44-488b-bdd3-58f078070f7e-kube-api-access-zbjv7\") pod \"infra-operator-controller-manager-d5cc86f4b-v7vtk\" (UID: \"7e82629b-ee44-488b-bdd3-58f078070f7e\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-v7vtk" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.356712 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8lbqp\" (UniqueName: \"kubernetes.io/projected/bc63ea3b-7e56-46cc-b2e9-2ff7b0cddc2e-kube-api-access-8lbqp\") pod \"glance-operator-controller-manager-68b95954c9-f7g9v\" (UID: \"bc63ea3b-7e56-46cc-b2e9-2ff7b0cddc2e\") " pod="openstack-operators/glance-operator-controller-manager-68b95954c9-f7g9v" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.356744 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d985q\" (UniqueName: \"kubernetes.io/projected/df3746c8-ec8b-406e-b2f5-7bd93dd46646-kube-api-access-d985q\") pod \"heat-operator-controller-manager-774b86978c-cql69\" (UID: \"df3746c8-ec8b-406e-b2f5-7bd93dd46646\") " pod="openstack-operators/heat-operator-controller-manager-774b86978c-cql69" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.357288 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nxldp\" (UniqueName: \"kubernetes.io/projected/39df322c-3527-4b0d-a719-4ecbfa944a56-kube-api-access-nxldp\") pod \"horizon-operator-controller-manager-68c9694994-jctfk\" (UID: \"39df322c-3527-4b0d-a719-4ecbfa944a56\") " pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jctfk" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.357335 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7e82629b-ee44-488b-bdd3-58f078070f7e-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-v7vtk\" (UID: \"7e82629b-ee44-488b-bdd3-58f078070f7e\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-v7vtk" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.361139 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-6jgx4" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.364264 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-58bb8d67cc-clqqr"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.365235 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h4nrb\" (UniqueName: \"kubernetes.io/projected/24f62db2-c526-493e-a703-43a661ea0228-kube-api-access-h4nrb\") pod \"designate-operator-controller-manager-7d695c9b56-ncq2x\" (UID: \"24f62db2-c526-493e-a703-43a661ea0228\") " pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-ncq2x" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.368482 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-clqqr" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.380322 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-979xc" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.423810 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5bfcdc958c-nfx6g"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.430280 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d985q\" (UniqueName: \"kubernetes.io/projected/df3746c8-ec8b-406e-b2f5-7bd93dd46646-kube-api-access-d985q\") pod \"heat-operator-controller-manager-774b86978c-cql69\" (UID: \"df3746c8-ec8b-406e-b2f5-7bd93dd46646\") " pod="openstack-operators/heat-operator-controller-manager-774b86978c-cql69" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.440671 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-748dc6576f-2w275"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.445144 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8lbqp\" (UniqueName: \"kubernetes.io/projected/bc63ea3b-7e56-46cc-b2e9-2ff7b0cddc2e-kube-api-access-8lbqp\") pod \"glance-operator-controller-manager-68b95954c9-f7g9v\" (UID: \"bc63ea3b-7e56-46cc-b2e9-2ff7b0cddc2e\") " pod="openstack-operators/glance-operator-controller-manager-68b95954c9-f7g9v" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.466557 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-58bb8d67cc-clqqr"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.467573 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nxldp\" (UniqueName: \"kubernetes.io/projected/39df322c-3527-4b0d-a719-4ecbfa944a56-kube-api-access-nxldp\") pod \"horizon-operator-controller-manager-68c9694994-jctfk\" (UID: \"39df322c-3527-4b0d-a719-4ecbfa944a56\") " pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jctfk" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.467610 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7e82629b-ee44-488b-bdd3-58f078070f7e-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-v7vtk\" (UID: \"7e82629b-ee44-488b-bdd3-58f078070f7e\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-v7vtk" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.467642 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7vsvn\" (UniqueName: \"kubernetes.io/projected/c2acb14d-547e-4528-addc-5bb388370b04-kube-api-access-7vsvn\") pod \"manila-operator-controller-manager-58bb8d67cc-clqqr\" (UID: \"c2acb14d-547e-4528-addc-5bb388370b04\") " pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-clqqr" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.467698 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dfz7j\" (UniqueName: \"kubernetes.io/projected/be284da4-49c2-4967-a810-eb5dbece93a3-kube-api-access-dfz7j\") pod \"keystone-operator-controller-manager-748dc6576f-2w275\" (UID: \"be284da4-49c2-4967-a810-eb5dbece93a3\") " pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-2w275" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.467716 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vls42\" (UniqueName: \"kubernetes.io/projected/f8a9119f-fc7e-4bb6-89da-91f7655c633d-kube-api-access-vls42\") pod \"ironic-operator-controller-manager-5bfcdc958c-nfx6g\" (UID: \"f8a9119f-fc7e-4bb6-89da-91f7655c633d\") " pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-nfx6g" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.467743 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zbjv7\" (UniqueName: \"kubernetes.io/projected/7e82629b-ee44-488b-bdd3-58f078070f7e-kube-api-access-zbjv7\") pod \"infra-operator-controller-manager-d5cc86f4b-v7vtk\" (UID: \"7e82629b-ee44-488b-bdd3-58f078070f7e\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-v7vtk" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.469858 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-774b86978c-cql69" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.473079 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7e82629b-ee44-488b-bdd3-58f078070f7e-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-v7vtk\" (UID: \"7e82629b-ee44-488b-bdd3-58f078070f7e\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-v7vtk" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.483701 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-vnlb4"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.485039 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-vnlb4" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.516562 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-6ttnm" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.521540 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-8tqbw"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.523130 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-8tqbw"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.523259 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-8tqbw" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.525967 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zbjv7\" (UniqueName: \"kubernetes.io/projected/7e82629b-ee44-488b-bdd3-58f078070f7e-kube-api-access-zbjv7\") pod \"infra-operator-controller-manager-d5cc86f4b-v7vtk\" (UID: \"7e82629b-ee44-488b-bdd3-58f078070f7e\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-v7vtk" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.533459 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-vnlb4"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.534078 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vls42\" (UniqueName: \"kubernetes.io/projected/f8a9119f-fc7e-4bb6-89da-91f7655c633d-kube-api-access-vls42\") pod \"ironic-operator-controller-manager-5bfcdc958c-nfx6g\" (UID: \"f8a9119f-fc7e-4bb6-89da-91f7655c633d\") " pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-nfx6g" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.549062 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-47lxp" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.550362 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nxldp\" (UniqueName: \"kubernetes.io/projected/39df322c-3527-4b0d-a719-4ecbfa944a56-kube-api-access-nxldp\") pod \"horizon-operator-controller-manager-68c9694994-jctfk\" (UID: \"39df322c-3527-4b0d-a719-4ecbfa944a56\") " pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jctfk" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.556918 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-nfx6g" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.564370 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-bh7th"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.566095 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bh7th" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.568794 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7vsvn\" (UniqueName: \"kubernetes.io/projected/c2acb14d-547e-4528-addc-5bb388370b04-kube-api-access-7vsvn\") pod \"manila-operator-controller-manager-58bb8d67cc-clqqr\" (UID: \"c2acb14d-547e-4528-addc-5bb388370b04\") " pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-clqqr" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.573095 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9ghbm\" (UniqueName: \"kubernetes.io/projected/f4138dbf-cfaf-4a82-bf69-d6065584d1ba-kube-api-access-9ghbm\") pod \"mariadb-operator-controller-manager-cb6c4fdb7-vnlb4\" (UID: \"f4138dbf-cfaf-4a82-bf69-d6065584d1ba\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-vnlb4" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.574300 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-vdv57" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.574987 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dfz7j\" (UniqueName: \"kubernetes.io/projected/be284da4-49c2-4967-a810-eb5dbece93a3-kube-api-access-dfz7j\") pod \"keystone-operator-controller-manager-748dc6576f-2w275\" (UID: \"be284da4-49c2-4967-a810-eb5dbece93a3\") " pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-2w275" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.584157 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-bh7th"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.588686 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-fd75fd47d-sr8nk"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.593337 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-fd75fd47d-sr8nk"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.612506 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-sr8nk" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.624464 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.624868 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-v7vtk" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.625817 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ppdhs" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.625970 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.635710 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.636053 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-hnjrj" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.649621 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-5k7ff" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.650162 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-ncq2x" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.651217 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7vsvn\" (UniqueName: \"kubernetes.io/projected/c2acb14d-547e-4528-addc-5bb388370b04-kube-api-access-7vsvn\") pod \"manila-operator-controller-manager-58bb8d67cc-clqqr\" (UID: \"c2acb14d-547e-4528-addc-5bb388370b04\") " pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-clqqr" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.668850 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-554b4f8994-dck8w"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.670284 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-554b4f8994-dck8w" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.675076 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7hwh\" (UniqueName: \"kubernetes.io/projected/1c460dd6-5f3d-4eae-9436-c46ccd900674-kube-api-access-z7hwh\") pod \"octavia-operator-controller-manager-fd75fd47d-sr8nk\" (UID: \"1c460dd6-5f3d-4eae-9436-c46ccd900674\") " pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-sr8nk" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.675164 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9ghbm\" (UniqueName: \"kubernetes.io/projected/f4138dbf-cfaf-4a82-bf69-d6065584d1ba-kube-api-access-9ghbm\") pod \"mariadb-operator-controller-manager-cb6c4fdb7-vnlb4\" (UID: \"f4138dbf-cfaf-4a82-bf69-d6065584d1ba\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-vnlb4" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.675238 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qqfn7\" (UniqueName: \"kubernetes.io/projected/0eb9999f-a946-4946-83e0-6cbf7be82741-kube-api-access-qqfn7\") pod \"nova-operator-controller-manager-79556f57fc-bh7th\" (UID: \"0eb9999f-a946-4946-83e0-6cbf7be82741\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bh7th" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.675283 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wxmvb\" (UniqueName: \"kubernetes.io/projected/66685e8a-e196-444b-9149-e7861ff2c8b5-kube-api-access-wxmvb\") pod \"neutron-operator-controller-manager-7c57c8bbc4-8tqbw\" (UID: \"66685e8a-e196-444b-9149-e7861ff2c8b5\") " pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-8tqbw" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.691912 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-2w275" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.693133 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.700027 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-k9pj2" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.703221 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-554b4f8994-dck8w"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.703327 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-f7g9v" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.722712 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wmpvm"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.724221 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wmpvm" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.737692 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9ghbm\" (UniqueName: \"kubernetes.io/projected/f4138dbf-cfaf-4a82-bf69-d6065584d1ba-kube-api-access-9ghbm\") pod \"mariadb-operator-controller-manager-cb6c4fdb7-vnlb4\" (UID: \"f4138dbf-cfaf-4a82-bf69-d6065584d1ba\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-vnlb4" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.737878 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wmpvm"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.749875 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-lnznw" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.785754 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q8gx5\" (UniqueName: \"kubernetes.io/projected/b9f37eec-f8fc-4083-b29a-4e704c802c8a-kube-api-access-q8gx5\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d\" (UID: \"b9f37eec-f8fc-4083-b29a-4e704c802c8a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.785837 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qqfn7\" (UniqueName: \"kubernetes.io/projected/0eb9999f-a946-4946-83e0-6cbf7be82741-kube-api-access-qqfn7\") pod \"nova-operator-controller-manager-79556f57fc-bh7th\" (UID: \"0eb9999f-a946-4946-83e0-6cbf7be82741\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bh7th" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.785935 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wxmvb\" (UniqueName: \"kubernetes.io/projected/66685e8a-e196-444b-9149-e7861ff2c8b5-kube-api-access-wxmvb\") pod \"neutron-operator-controller-manager-7c57c8bbc4-8tqbw\" (UID: \"66685e8a-e196-444b-9149-e7861ff2c8b5\") " pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-8tqbw" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.786098 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z7hwh\" (UniqueName: \"kubernetes.io/projected/1c460dd6-5f3d-4eae-9436-c46ccd900674-kube-api-access-z7hwh\") pod \"octavia-operator-controller-manager-fd75fd47d-sr8nk\" (UID: \"1c460dd6-5f3d-4eae-9436-c46ccd900674\") " pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-sr8nk" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.786136 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b9f37eec-f8fc-4083-b29a-4e704c802c8a-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d\" (UID: \"b9f37eec-f8fc-4083-b29a-4e704c802c8a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.786264 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gc9km\" (UniqueName: \"kubernetes.io/projected/f46c7222-cbb0-457d-bb11-15d8cb855c8b-kube-api-access-gc9km\") pod \"ovn-operator-controller-manager-554b4f8994-dck8w\" (UID: \"f46c7222-cbb0-457d-bb11-15d8cb855c8b\") " pod="openstack-operators/ovn-operator-controller-manager-554b4f8994-dck8w" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.788515 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-567f98c9d-sn2x6"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.791429 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-sn2x6" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.809629 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-t76qc" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.816355 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qqfn7\" (UniqueName: \"kubernetes.io/projected/0eb9999f-a946-4946-83e0-6cbf7be82741-kube-api-access-qqfn7\") pod \"nova-operator-controller-manager-79556f57fc-bh7th\" (UID: \"0eb9999f-a946-4946-83e0-6cbf7be82741\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bh7th" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.826306 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z7hwh\" (UniqueName: \"kubernetes.io/projected/1c460dd6-5f3d-4eae-9436-c46ccd900674-kube-api-access-z7hwh\") pod \"octavia-operator-controller-manager-fd75fd47d-sr8nk\" (UID: \"1c460dd6-5f3d-4eae-9436-c46ccd900674\") " pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-sr8nk" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.829148 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-5db546f9d9-kp2bb"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.831550 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kp2bb" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.833958 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-clqqr" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.834880 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jctfk" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.835185 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-g8hmn" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.838356 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5cb74df96-4czcs"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.840219 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cb74df96-4czcs" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.847873 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-mgfhn" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.857915 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-vnlb4" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.861847 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-567f98c9d-sn2x6"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.869167 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-864885998-8qh9m"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.870725 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-864885998-8qh9m" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.876696 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-dfd8r" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.888654 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lpv5g\" (UniqueName: \"kubernetes.io/projected/0ed3c5b9-6275-4aa7-9f4c-a5e7ae6404f2-kube-api-access-lpv5g\") pod \"telemetry-operator-controller-manager-567f98c9d-sn2x6\" (UID: \"0ed3c5b9-6275-4aa7-9f4c-a5e7ae6404f2\") " pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-sn2x6" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.889080 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b9f37eec-f8fc-4083-b29a-4e704c802c8a-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d\" (UID: \"b9f37eec-f8fc-4083-b29a-4e704c802c8a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.889115 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-prfv4\" (UniqueName: \"kubernetes.io/projected/c7e06db1-dbe0-48c4-ba25-ef962e6cd3d7-kube-api-access-prfv4\") pod \"swift-operator-controller-manager-6fdc4fcf86-wmpvm\" (UID: \"c7e06db1-dbe0-48c4-ba25-ef962e6cd3d7\") " pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wmpvm" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.889173 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gc9km\" (UniqueName: \"kubernetes.io/projected/f46c7222-cbb0-457d-bb11-15d8cb855c8b-kube-api-access-gc9km\") pod \"ovn-operator-controller-manager-554b4f8994-dck8w\" (UID: \"f46c7222-cbb0-457d-bb11-15d8cb855c8b\") " pod="openstack-operators/ovn-operator-controller-manager-554b4f8994-dck8w" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.889237 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q8gx5\" (UniqueName: \"kubernetes.io/projected/b9f37eec-f8fc-4083-b29a-4e704c802c8a-kube-api-access-q8gx5\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d\" (UID: \"b9f37eec-f8fc-4083-b29a-4e704c802c8a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d" Nov 24 08:10:43 crc kubenswrapper[4691]: E1124 08:10:43.889407 4691 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 24 08:10:43 crc kubenswrapper[4691]: E1124 08:10:43.889575 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b9f37eec-f8fc-4083-b29a-4e704c802c8a-cert podName:b9f37eec-f8fc-4083-b29a-4e704c802c8a nodeName:}" failed. No retries permitted until 2025-11-24 08:10:44.389549044 +0000 UTC m=+806.388498293 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b9f37eec-f8fc-4083-b29a-4e704c802c8a-cert") pod "openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d" (UID: "b9f37eec-f8fc-4083-b29a-4e704c802c8a") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.894554 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cb74df96-4czcs"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.906842 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5db546f9d9-kp2bb"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.910342 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wxmvb\" (UniqueName: \"kubernetes.io/projected/66685e8a-e196-444b-9149-e7861ff2c8b5-kube-api-access-wxmvb\") pod \"neutron-operator-controller-manager-7c57c8bbc4-8tqbw\" (UID: \"66685e8a-e196-444b-9149-e7861ff2c8b5\") " pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-8tqbw" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.911001 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-864885998-8qh9m"] Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.931969 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bh7th" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.967661 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q8gx5\" (UniqueName: \"kubernetes.io/projected/b9f37eec-f8fc-4083-b29a-4e704c802c8a-kube-api-access-q8gx5\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d\" (UID: \"b9f37eec-f8fc-4083-b29a-4e704c802c8a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.972553 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gc9km\" (UniqueName: \"kubernetes.io/projected/f46c7222-cbb0-457d-bb11-15d8cb855c8b-kube-api-access-gc9km\") pod \"ovn-operator-controller-manager-554b4f8994-dck8w\" (UID: \"f46c7222-cbb0-457d-bb11-15d8cb855c8b\") " pod="openstack-operators/ovn-operator-controller-manager-554b4f8994-dck8w" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.997474 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-47g9z\" (UniqueName: \"kubernetes.io/projected/63c87b6f-c210-4837-bde9-87436a88578f-kube-api-access-47g9z\") pod \"placement-operator-controller-manager-5db546f9d9-kp2bb\" (UID: \"63c87b6f-c210-4837-bde9-87436a88578f\") " pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kp2bb" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.997541 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m88mh\" (UniqueName: \"kubernetes.io/projected/345576fd-a4cd-4c76-8c81-3669a42be294-kube-api-access-m88mh\") pod \"watcher-operator-controller-manager-864885998-8qh9m\" (UID: \"345576fd-a4cd-4c76-8c81-3669a42be294\") " pod="openstack-operators/watcher-operator-controller-manager-864885998-8qh9m" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.997579 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lpv5g\" (UniqueName: \"kubernetes.io/projected/0ed3c5b9-6275-4aa7-9f4c-a5e7ae6404f2-kube-api-access-lpv5g\") pod \"telemetry-operator-controller-manager-567f98c9d-sn2x6\" (UID: \"0ed3c5b9-6275-4aa7-9f4c-a5e7ae6404f2\") " pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-sn2x6" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.997637 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-prfv4\" (UniqueName: \"kubernetes.io/projected/c7e06db1-dbe0-48c4-ba25-ef962e6cd3d7-kube-api-access-prfv4\") pod \"swift-operator-controller-manager-6fdc4fcf86-wmpvm\" (UID: \"c7e06db1-dbe0-48c4-ba25-ef962e6cd3d7\") " pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wmpvm" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.997671 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hswq7\" (UniqueName: \"kubernetes.io/projected/ccc21638-592f-4e4f-87df-f95f79a5c23e-kube-api-access-hswq7\") pod \"test-operator-controller-manager-5cb74df96-4czcs\" (UID: \"ccc21638-592f-4e4f-87df-f95f79a5c23e\") " pod="openstack-operators/test-operator-controller-manager-5cb74df96-4czcs" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.998279 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-sr8nk" Nov 24 08:10:43 crc kubenswrapper[4691]: I1124 08:10:43.999044 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7888ffcffd-8jst5"] Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.005159 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-7888ffcffd-8jst5" Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.011814 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-rd6f4" Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.012039 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.012145 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.036241 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7888ffcffd-8jst5"] Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.045628 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-prfv4\" (UniqueName: \"kubernetes.io/projected/c7e06db1-dbe0-48c4-ba25-ef962e6cd3d7-kube-api-access-prfv4\") pod \"swift-operator-controller-manager-6fdc4fcf86-wmpvm\" (UID: \"c7e06db1-dbe0-48c4-ba25-ef962e6cd3d7\") " pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wmpvm" Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.056232 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lpv5g\" (UniqueName: \"kubernetes.io/projected/0ed3c5b9-6275-4aa7-9f4c-a5e7ae6404f2-kube-api-access-lpv5g\") pod \"telemetry-operator-controller-manager-567f98c9d-sn2x6\" (UID: \"0ed3c5b9-6275-4aa7-9f4c-a5e7ae6404f2\") " pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-sn2x6" Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.058436 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-554b4f8994-dck8w" Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.099631 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/603e76a3-8258-43ec-850b-d2c34845cd8b-webhook-certs\") pod \"openstack-operator-controller-manager-7888ffcffd-8jst5\" (UID: \"603e76a3-8258-43ec-850b-d2c34845cd8b\") " pod="openstack-operators/openstack-operator-controller-manager-7888ffcffd-8jst5" Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.099737 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/603e76a3-8258-43ec-850b-d2c34845cd8b-metrics-certs\") pod \"openstack-operator-controller-manager-7888ffcffd-8jst5\" (UID: \"603e76a3-8258-43ec-850b-d2c34845cd8b\") " pod="openstack-operators/openstack-operator-controller-manager-7888ffcffd-8jst5" Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.099784 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-47g9z\" (UniqueName: \"kubernetes.io/projected/63c87b6f-c210-4837-bde9-87436a88578f-kube-api-access-47g9z\") pod \"placement-operator-controller-manager-5db546f9d9-kp2bb\" (UID: \"63c87b6f-c210-4837-bde9-87436a88578f\") " pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kp2bb" Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.099808 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m88mh\" (UniqueName: \"kubernetes.io/projected/345576fd-a4cd-4c76-8c81-3669a42be294-kube-api-access-m88mh\") pod \"watcher-operator-controller-manager-864885998-8qh9m\" (UID: \"345576fd-a4cd-4c76-8c81-3669a42be294\") " pod="openstack-operators/watcher-operator-controller-manager-864885998-8qh9m" Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.099855 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hswq7\" (UniqueName: \"kubernetes.io/projected/ccc21638-592f-4e4f-87df-f95f79a5c23e-kube-api-access-hswq7\") pod \"test-operator-controller-manager-5cb74df96-4czcs\" (UID: \"ccc21638-592f-4e4f-87df-f95f79a5c23e\") " pod="openstack-operators/test-operator-controller-manager-5cb74df96-4czcs" Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.099884 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bhmwf\" (UniqueName: \"kubernetes.io/projected/603e76a3-8258-43ec-850b-d2c34845cd8b-kube-api-access-bhmwf\") pod \"openstack-operator-controller-manager-7888ffcffd-8jst5\" (UID: \"603e76a3-8258-43ec-850b-d2c34845cd8b\") " pod="openstack-operators/openstack-operator-controller-manager-7888ffcffd-8jst5" Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.100981 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wmpvm" Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.122303 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-65lbw"] Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.124979 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-65lbw" Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.132126 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-jdwrz" Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.134861 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-65lbw"] Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.142040 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-47g9z\" (UniqueName: \"kubernetes.io/projected/63c87b6f-c210-4837-bde9-87436a88578f-kube-api-access-47g9z\") pod \"placement-operator-controller-manager-5db546f9d9-kp2bb\" (UID: \"63c87b6f-c210-4837-bde9-87436a88578f\") " pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kp2bb" Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.164084 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hswq7\" (UniqueName: \"kubernetes.io/projected/ccc21638-592f-4e4f-87df-f95f79a5c23e-kube-api-access-hswq7\") pod \"test-operator-controller-manager-5cb74df96-4czcs\" (UID: \"ccc21638-592f-4e4f-87df-f95f79a5c23e\") " pod="openstack-operators/test-operator-controller-manager-5cb74df96-4czcs" Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.186389 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cb74df96-4czcs" Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.191487 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m88mh\" (UniqueName: \"kubernetes.io/projected/345576fd-a4cd-4c76-8c81-3669a42be294-kube-api-access-m88mh\") pod \"watcher-operator-controller-manager-864885998-8qh9m\" (UID: \"345576fd-a4cd-4c76-8c81-3669a42be294\") " pod="openstack-operators/watcher-operator-controller-manager-864885998-8qh9m" Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.203972 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-8tqbw" Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.204470 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bhmwf\" (UniqueName: \"kubernetes.io/projected/603e76a3-8258-43ec-850b-d2c34845cd8b-kube-api-access-bhmwf\") pod \"openstack-operator-controller-manager-7888ffcffd-8jst5\" (UID: \"603e76a3-8258-43ec-850b-d2c34845cd8b\") " pod="openstack-operators/openstack-operator-controller-manager-7888ffcffd-8jst5" Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.204547 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/603e76a3-8258-43ec-850b-d2c34845cd8b-webhook-certs\") pod \"openstack-operator-controller-manager-7888ffcffd-8jst5\" (UID: \"603e76a3-8258-43ec-850b-d2c34845cd8b\") " pod="openstack-operators/openstack-operator-controller-manager-7888ffcffd-8jst5" Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.204616 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/603e76a3-8258-43ec-850b-d2c34845cd8b-metrics-certs\") pod \"openstack-operator-controller-manager-7888ffcffd-8jst5\" (UID: \"603e76a3-8258-43ec-850b-d2c34845cd8b\") " pod="openstack-operators/openstack-operator-controller-manager-7888ffcffd-8jst5" Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.204640 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ksbf8\" (UniqueName: \"kubernetes.io/projected/f3bb505d-02c4-49ec-94c5-a349cb5a4468-kube-api-access-ksbf8\") pod \"rabbitmq-cluster-operator-manager-668c99d594-65lbw\" (UID: \"f3bb505d-02c4-49ec-94c5-a349cb5a4468\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-65lbw" Nov 24 08:10:44 crc kubenswrapper[4691]: E1124 08:10:44.204874 4691 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 24 08:10:44 crc kubenswrapper[4691]: E1124 08:10:44.204972 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/603e76a3-8258-43ec-850b-d2c34845cd8b-webhook-certs podName:603e76a3-8258-43ec-850b-d2c34845cd8b nodeName:}" failed. No retries permitted until 2025-11-24 08:10:44.704939064 +0000 UTC m=+806.703888373 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/603e76a3-8258-43ec-850b-d2c34845cd8b-webhook-certs") pod "openstack-operator-controller-manager-7888ffcffd-8jst5" (UID: "603e76a3-8258-43ec-850b-d2c34845cd8b") : secret "webhook-server-cert" not found Nov 24 08:10:44 crc kubenswrapper[4691]: E1124 08:10:44.205426 4691 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 24 08:10:44 crc kubenswrapper[4691]: E1124 08:10:44.205503 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/603e76a3-8258-43ec-850b-d2c34845cd8b-metrics-certs podName:603e76a3-8258-43ec-850b-d2c34845cd8b nodeName:}" failed. No retries permitted until 2025-11-24 08:10:44.70549282 +0000 UTC m=+806.704442149 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/603e76a3-8258-43ec-850b-d2c34845cd8b-metrics-certs") pod "openstack-operator-controller-manager-7888ffcffd-8jst5" (UID: "603e76a3-8258-43ec-850b-d2c34845cd8b") : secret "metrics-server-cert" not found Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.216240 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-bb2l8" Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.216312 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bb2l8" Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.307777 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bhmwf\" (UniqueName: \"kubernetes.io/projected/603e76a3-8258-43ec-850b-d2c34845cd8b-kube-api-access-bhmwf\") pod \"openstack-operator-controller-manager-7888ffcffd-8jst5\" (UID: \"603e76a3-8258-43ec-850b-d2c34845cd8b\") " pod="openstack-operators/openstack-operator-controller-manager-7888ffcffd-8jst5" Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.307809 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ksbf8\" (UniqueName: \"kubernetes.io/projected/f3bb505d-02c4-49ec-94c5-a349cb5a4468-kube-api-access-ksbf8\") pod \"rabbitmq-cluster-operator-manager-668c99d594-65lbw\" (UID: \"f3bb505d-02c4-49ec-94c5-a349cb5a4468\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-65lbw" Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.339261 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-sn2x6" Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.344187 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ksbf8\" (UniqueName: \"kubernetes.io/projected/f3bb505d-02c4-49ec-94c5-a349cb5a4468-kube-api-access-ksbf8\") pod \"rabbitmq-cluster-operator-manager-668c99d594-65lbw\" (UID: \"f3bb505d-02c4-49ec-94c5-a349cb5a4468\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-65lbw" Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.358716 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kp2bb" Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.411835 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b9f37eec-f8fc-4083-b29a-4e704c802c8a-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d\" (UID: \"b9f37eec-f8fc-4083-b29a-4e704c802c8a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d" Nov 24 08:10:44 crc kubenswrapper[4691]: E1124 08:10:44.412123 4691 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 24 08:10:44 crc kubenswrapper[4691]: E1124 08:10:44.412202 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b9f37eec-f8fc-4083-b29a-4e704c802c8a-cert podName:b9f37eec-f8fc-4083-b29a-4e704c802c8a nodeName:}" failed. No retries permitted until 2025-11-24 08:10:45.412178209 +0000 UTC m=+807.411127458 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b9f37eec-f8fc-4083-b29a-4e704c802c8a-cert") pod "openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d" (UID: "b9f37eec-f8fc-4083-b29a-4e704c802c8a") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.575693 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-864885998-8qh9m" Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.702237 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-65lbw" Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.721979 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/603e76a3-8258-43ec-850b-d2c34845cd8b-webhook-certs\") pod \"openstack-operator-controller-manager-7888ffcffd-8jst5\" (UID: \"603e76a3-8258-43ec-850b-d2c34845cd8b\") " pod="openstack-operators/openstack-operator-controller-manager-7888ffcffd-8jst5" Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.722053 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/603e76a3-8258-43ec-850b-d2c34845cd8b-metrics-certs\") pod \"openstack-operator-controller-manager-7888ffcffd-8jst5\" (UID: \"603e76a3-8258-43ec-850b-d2c34845cd8b\") " pod="openstack-operators/openstack-operator-controller-manager-7888ffcffd-8jst5" Nov 24 08:10:44 crc kubenswrapper[4691]: E1124 08:10:44.722267 4691 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 24 08:10:44 crc kubenswrapper[4691]: E1124 08:10:44.722386 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/603e76a3-8258-43ec-850b-d2c34845cd8b-webhook-certs podName:603e76a3-8258-43ec-850b-d2c34845cd8b nodeName:}" failed. No retries permitted until 2025-11-24 08:10:45.722352257 +0000 UTC m=+807.721301506 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/603e76a3-8258-43ec-850b-d2c34845cd8b-webhook-certs") pod "openstack-operator-controller-manager-7888ffcffd-8jst5" (UID: "603e76a3-8258-43ec-850b-d2c34845cd8b") : secret "webhook-server-cert" not found Nov 24 08:10:44 crc kubenswrapper[4691]: E1124 08:10:44.722498 4691 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 24 08:10:44 crc kubenswrapper[4691]: E1124 08:10:44.722589 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/603e76a3-8258-43ec-850b-d2c34845cd8b-metrics-certs podName:603e76a3-8258-43ec-850b-d2c34845cd8b nodeName:}" failed. No retries permitted until 2025-11-24 08:10:45.722567023 +0000 UTC m=+807.721516272 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/603e76a3-8258-43ec-850b-d2c34845cd8b-metrics-certs") pod "openstack-operator-controller-manager-7888ffcffd-8jst5" (UID: "603e76a3-8258-43ec-850b-d2c34845cd8b") : secret "metrics-server-cert" not found Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.765392 4691 generic.go:334] "Generic (PLEG): container finished" podID="c5492776-03c1-422f-8097-e69c5fbf459b" containerID="7089cb8f3da2e0988ca4e9f8a67ab76a25791ace26453b05003cd9f52129ef7a" exitCode=0 Nov 24 08:10:44 crc kubenswrapper[4691]: I1124 08:10:44.765434 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-st4t6" event={"ID":"c5492776-03c1-422f-8097-e69c5fbf459b","Type":"ContainerDied","Data":"7089cb8f3da2e0988ca4e9f8a67ab76a25791ace26453b05003cd9f52129ef7a"} Nov 24 08:10:45 crc kubenswrapper[4691]: I1124 08:10:45.080950 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-774b86978c-cql69"] Nov 24 08:10:45 crc kubenswrapper[4691]: I1124 08:10:45.326054 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bb2l8" podUID="29cbcb95-d722-413b-893c-f04bdb5ea301" containerName="registry-server" probeResult="failure" output=< Nov 24 08:10:45 crc kubenswrapper[4691]: timeout: failed to connect service ":50051" within 1s Nov 24 08:10:45 crc kubenswrapper[4691]: > Nov 24 08:10:45 crc kubenswrapper[4691]: I1124 08:10:45.382210 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5bfcdc958c-nfx6g"] Nov 24 08:10:45 crc kubenswrapper[4691]: I1124 08:10:45.396651 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ppdhs"] Nov 24 08:10:45 crc kubenswrapper[4691]: I1124 08:10:45.408644 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-79856dc55c-6jgx4"] Nov 24 08:10:45 crc kubenswrapper[4691]: I1124 08:10:45.443605 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b9f37eec-f8fc-4083-b29a-4e704c802c8a-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d\" (UID: \"b9f37eec-f8fc-4083-b29a-4e704c802c8a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d" Nov 24 08:10:45 crc kubenswrapper[4691]: E1124 08:10:45.443802 4691 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 24 08:10:45 crc kubenswrapper[4691]: E1124 08:10:45.443858 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b9f37eec-f8fc-4083-b29a-4e704c802c8a-cert podName:b9f37eec-f8fc-4083-b29a-4e704c802c8a nodeName:}" failed. No retries permitted until 2025-11-24 08:10:47.443841904 +0000 UTC m=+809.442791153 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b9f37eec-f8fc-4083-b29a-4e704c802c8a-cert") pod "openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d" (UID: "b9f37eec-f8fc-4083-b29a-4e704c802c8a") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 24 08:10:45 crc kubenswrapper[4691]: I1124 08:10:45.750238 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/603e76a3-8258-43ec-850b-d2c34845cd8b-metrics-certs\") pod \"openstack-operator-controller-manager-7888ffcffd-8jst5\" (UID: \"603e76a3-8258-43ec-850b-d2c34845cd8b\") " pod="openstack-operators/openstack-operator-controller-manager-7888ffcffd-8jst5" Nov 24 08:10:45 crc kubenswrapper[4691]: I1124 08:10:45.750778 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/603e76a3-8258-43ec-850b-d2c34845cd8b-webhook-certs\") pod \"openstack-operator-controller-manager-7888ffcffd-8jst5\" (UID: \"603e76a3-8258-43ec-850b-d2c34845cd8b\") " pod="openstack-operators/openstack-operator-controller-manager-7888ffcffd-8jst5" Nov 24 08:10:45 crc kubenswrapper[4691]: E1124 08:10:45.752743 4691 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 24 08:10:45 crc kubenswrapper[4691]: E1124 08:10:45.752838 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/603e76a3-8258-43ec-850b-d2c34845cd8b-metrics-certs podName:603e76a3-8258-43ec-850b-d2c34845cd8b nodeName:}" failed. No retries permitted until 2025-11-24 08:10:47.752814818 +0000 UTC m=+809.751764077 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/603e76a3-8258-43ec-850b-d2c34845cd8b-metrics-certs") pod "openstack-operator-controller-manager-7888ffcffd-8jst5" (UID: "603e76a3-8258-43ec-850b-d2c34845cd8b") : secret "metrics-server-cert" not found Nov 24 08:10:45 crc kubenswrapper[4691]: I1124 08:10:45.770397 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/603e76a3-8258-43ec-850b-d2c34845cd8b-webhook-certs\") pod \"openstack-operator-controller-manager-7888ffcffd-8jst5\" (UID: \"603e76a3-8258-43ec-850b-d2c34845cd8b\") " pod="openstack-operators/openstack-operator-controller-manager-7888ffcffd-8jst5" Nov 24 08:10:45 crc kubenswrapper[4691]: I1124 08:10:45.777574 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ppdhs" event={"ID":"22fec998-136d-4bc0-9db1-1e4ac6e1107c","Type":"ContainerStarted","Data":"23cd17bd39369a054e4cb54d2823101451e3903215268eb4740bdb631c000655"} Nov 24 08:10:45 crc kubenswrapper[4691]: I1124 08:10:45.779050 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-cql69" event={"ID":"df3746c8-ec8b-406e-b2f5-7bd93dd46646","Type":"ContainerStarted","Data":"31748448ad12ece95de432090b0ff7d7249efb592aa6e66fd275e65b0698a9f6"} Nov 24 08:10:45 crc kubenswrapper[4691]: I1124 08:10:45.780083 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-6jgx4" event={"ID":"132ed997-05f1-4484-a11a-3e282b0e889b","Type":"ContainerStarted","Data":"bca9d45009e09a4371e18abc8cc14a16cdc8c36021a532f331ca7f7f6ec2e98b"} Nov 24 08:10:45 crc kubenswrapper[4691]: I1124 08:10:45.781007 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-nfx6g" event={"ID":"f8a9119f-fc7e-4bb6-89da-91f7655c633d","Type":"ContainerStarted","Data":"cf18e9dc4966072876c9d2edd0d668a860a8ee4248167d9cb674fd934fb27712"} Nov 24 08:10:45 crc kubenswrapper[4691]: I1124 08:10:45.783433 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-st4t6" event={"ID":"c5492776-03c1-422f-8097-e69c5fbf459b","Type":"ContainerStarted","Data":"1502cd567dc6711d9838553e0cea26e64fcbcee311c4f7c530f3f7f3aeb5e761"} Nov 24 08:10:45 crc kubenswrapper[4691]: I1124 08:10:45.814533 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-st4t6" podStartSLOduration=3.241476958 podStartE2EDuration="6.814513005s" podCreationTimestamp="2025-11-24 08:10:39 +0000 UTC" firstStartedPulling="2025-11-24 08:10:41.630894145 +0000 UTC m=+803.629843394" lastFinishedPulling="2025-11-24 08:10:45.203930192 +0000 UTC m=+807.202879441" observedRunningTime="2025-11-24 08:10:45.81362913 +0000 UTC m=+807.812578389" watchObservedRunningTime="2025-11-24 08:10:45.814513005 +0000 UTC m=+807.813462254" Nov 24 08:10:45 crc kubenswrapper[4691]: I1124 08:10:45.860984 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-8tqbw"] Nov 24 08:10:45 crc kubenswrapper[4691]: I1124 08:10:45.867264 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-vnlb4"] Nov 24 08:10:45 crc kubenswrapper[4691]: I1124 08:10:45.879461 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-554b4f8994-dck8w"] Nov 24 08:10:45 crc kubenswrapper[4691]: I1124 08:10:45.888060 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-7d695c9b56-ncq2x"] Nov 24 08:10:45 crc kubenswrapper[4691]: I1124 08:10:45.932973 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-d5cc86f4b-v7vtk"] Nov 24 08:10:45 crc kubenswrapper[4691]: I1124 08:10:45.987199 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c9694994-jctfk"] Nov 24 08:10:46 crc kubenswrapper[4691]: I1124 08:10:46.004620 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-fd75fd47d-sr8nk"] Nov 24 08:10:46 crc kubenswrapper[4691]: I1124 08:10:46.010312 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-58bb8d67cc-clqqr"] Nov 24 08:10:46 crc kubenswrapper[4691]: I1124 08:10:46.229142 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-68b95954c9-f7g9v"] Nov 24 08:10:46 crc kubenswrapper[4691]: I1124 08:10:46.283156 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-748dc6576f-2w275"] Nov 24 08:10:46 crc kubenswrapper[4691]: I1124 08:10:46.293953 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-567f98c9d-sn2x6"] Nov 24 08:10:46 crc kubenswrapper[4691]: I1124 08:10:46.312875 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cb74df96-4czcs"] Nov 24 08:10:46 crc kubenswrapper[4691]: I1124 08:10:46.317635 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-bh7th"] Nov 24 08:10:46 crc kubenswrapper[4691]: I1124 08:10:46.322927 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-65lbw"] Nov 24 08:10:46 crc kubenswrapper[4691]: I1124 08:10:46.330787 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-864885998-8qh9m"] Nov 24 08:10:46 crc kubenswrapper[4691]: I1124 08:10:46.335923 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5db546f9d9-kp2bb"] Nov 24 08:10:46 crc kubenswrapper[4691]: I1124 08:10:46.340366 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wmpvm"] Nov 24 08:10:46 crc kubenswrapper[4691]: W1124 08:10:46.362123 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf3bb505d_02c4_49ec_94c5_a349cb5a4468.slice/crio-ed25a16e83c2b4cd8e2161e198a402f8f8fd4fb74a023e021ca88da9b25da401 WatchSource:0}: Error finding container ed25a16e83c2b4cd8e2161e198a402f8f8fd4fb74a023e021ca88da9b25da401: Status 404 returned error can't find the container with id ed25a16e83c2b4cd8e2161e198a402f8f8fd4fb74a023e021ca88da9b25da401 Nov 24 08:10:46 crc kubenswrapper[4691]: W1124 08:10:46.363091 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod63c87b6f_c210_4837_bde9_87436a88578f.slice/crio-a06dcd0602f06d7b4180da2c848b01e5730fa68a68a6bca0aa5a4100b4b359b3 WatchSource:0}: Error finding container a06dcd0602f06d7b4180da2c848b01e5730fa68a68a6bca0aa5a4100b4b359b3: Status 404 returned error can't find the container with id a06dcd0602f06d7b4180da2c848b01e5730fa68a68a6bca0aa5a4100b4b359b3 Nov 24 08:10:46 crc kubenswrapper[4691]: E1124 08:10:46.365894 4691 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ksbf8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-65lbw_openstack-operators(f3bb505d-02c4-49ec-94c5-a349cb5a4468): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 24 08:10:46 crc kubenswrapper[4691]: E1124 08:10:46.366424 4691 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:4094e7fc11a33e8e2b6768a053cafaf5b122446d23f9113d43d520cb64e9776c,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-47g9z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-5db546f9d9-kp2bb_openstack-operators(63c87b6f-c210-4837-bde9-87436a88578f): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 24 08:10:46 crc kubenswrapper[4691]: W1124 08:10:46.369155 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod345576fd_a4cd_4c76_8c81_3669a42be294.slice/crio-8f12bbde6fd3ef0f30ec3dafffcc6288afeb0b9ee2d2a13b2d3f707aa45de05c WatchSource:0}: Error finding container 8f12bbde6fd3ef0f30ec3dafffcc6288afeb0b9ee2d2a13b2d3f707aa45de05c: Status 404 returned error can't find the container with id 8f12bbde6fd3ef0f30ec3dafffcc6288afeb0b9ee2d2a13b2d3f707aa45de05c Nov 24 08:10:46 crc kubenswrapper[4691]: E1124 08:10:46.369329 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-65lbw" podUID="f3bb505d-02c4-49ec-94c5-a349cb5a4468" Nov 24 08:10:46 crc kubenswrapper[4691]: E1124 08:10:46.370519 4691 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-47g9z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-5db546f9d9-kp2bb_openstack-operators(63c87b6f-c210-4837-bde9-87436a88578f): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 24 08:10:46 crc kubenswrapper[4691]: E1124 08:10:46.372053 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kp2bb" podUID="63c87b6f-c210-4837-bde9-87436a88578f" Nov 24 08:10:46 crc kubenswrapper[4691]: W1124 08:10:46.372090 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0ed3c5b9_6275_4aa7_9f4c_a5e7ae6404f2.slice/crio-621ce0db1fac0f94fd6ee5c0d3022ef517f8b0ddafa7d4e0a306fc6252840d7a WatchSource:0}: Error finding container 621ce0db1fac0f94fd6ee5c0d3022ef517f8b0ddafa7d4e0a306fc6252840d7a: Status 404 returned error can't find the container with id 621ce0db1fac0f94fd6ee5c0d3022ef517f8b0ddafa7d4e0a306fc6252840d7a Nov 24 08:10:46 crc kubenswrapper[4691]: E1124 08:10:46.372685 4691 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-m88mh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-864885998-8qh9m_openstack-operators(345576fd-a4cd-4c76-8c81-3669a42be294): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 24 08:10:46 crc kubenswrapper[4691]: E1124 08:10:46.375283 4691 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-m88mh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-864885998-8qh9m_openstack-operators(345576fd-a4cd-4c76-8c81-3669a42be294): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 24 08:10:46 crc kubenswrapper[4691]: W1124 08:10:46.375820 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc7e06db1_dbe0_48c4_ba25_ef962e6cd3d7.slice/crio-8ffa54c36e278db2b1ecae8c10cc402d682ffa5e9c8bad3e75a127373c29d45a WatchSource:0}: Error finding container 8ffa54c36e278db2b1ecae8c10cc402d682ffa5e9c8bad3e75a127373c29d45a: Status 404 returned error can't find the container with id 8ffa54c36e278db2b1ecae8c10cc402d682ffa5e9c8bad3e75a127373c29d45a Nov 24 08:10:46 crc kubenswrapper[4691]: E1124 08:10:46.377050 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/watcher-operator-controller-manager-864885998-8qh9m" podUID="345576fd-a4cd-4c76-8c81-3669a42be294" Nov 24 08:10:46 crc kubenswrapper[4691]: E1124 08:10:46.379167 4691 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:5324a6d2f76fc3041023b0cbd09a733ef2b59f310d390e4d6483d219eb96494f,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lpv5g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-567f98c9d-sn2x6_openstack-operators(0ed3c5b9-6275-4aa7-9f4c-a5e7ae6404f2): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 24 08:10:46 crc kubenswrapper[4691]: E1124 08:10:46.380875 4691 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-prfv4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-6fdc4fcf86-wmpvm_openstack-operators(c7e06db1-dbe0-48c4-ba25-ef962e6cd3d7): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 24 08:10:46 crc kubenswrapper[4691]: E1124 08:10:46.381454 4691 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qqfn7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-79556f57fc-bh7th_openstack-operators(0eb9999f-a946-4946-83e0-6cbf7be82741): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 24 08:10:46 crc kubenswrapper[4691]: E1124 08:10:46.382080 4691 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lpv5g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-567f98c9d-sn2x6_openstack-operators(0ed3c5b9-6275-4aa7-9f4c-a5e7ae6404f2): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 24 08:10:46 crc kubenswrapper[4691]: E1124 08:10:46.382545 4691 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-prfv4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-6fdc4fcf86-wmpvm_openstack-operators(c7e06db1-dbe0-48c4-ba25-ef962e6cd3d7): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 24 08:10:46 crc kubenswrapper[4691]: E1124 08:10:46.383181 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-sn2x6" podUID="0ed3c5b9-6275-4aa7-9f4c-a5e7ae6404f2" Nov 24 08:10:46 crc kubenswrapper[4691]: E1124 08:10:46.384273 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wmpvm" podUID="c7e06db1-dbe0-48c4-ba25-ef962e6cd3d7" Nov 24 08:10:46 crc kubenswrapper[4691]: E1124 08:10:46.386510 4691 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qqfn7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-79556f57fc-bh7th_openstack-operators(0eb9999f-a946-4946-83e0-6cbf7be82741): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 24 08:10:46 crc kubenswrapper[4691]: E1124 08:10:46.389728 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bh7th" podUID="0eb9999f-a946-4946-83e0-6cbf7be82741" Nov 24 08:10:46 crc kubenswrapper[4691]: I1124 08:10:46.798848 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-v7vtk" event={"ID":"7e82629b-ee44-488b-bdd3-58f078070f7e","Type":"ContainerStarted","Data":"4f04edc56da9613d64632e0d6c115292f3fdfbe64da2032da5dc4a0d5ea5032f"} Nov 24 08:10:46 crc kubenswrapper[4691]: I1124 08:10:46.803007 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-8tqbw" event={"ID":"66685e8a-e196-444b-9149-e7861ff2c8b5","Type":"ContainerStarted","Data":"8fc7f01219d6d679a4491d425550f396f197392a5356109412345068f69094e6"} Nov 24 08:10:46 crc kubenswrapper[4691]: I1124 08:10:46.806307 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-2w275" event={"ID":"be284da4-49c2-4967-a810-eb5dbece93a3","Type":"ContainerStarted","Data":"5e689a6b3df9d5e8fcbe6dd761e06f2618fa93e924f52e30e91d609a01091867"} Nov 24 08:10:46 crc kubenswrapper[4691]: I1124 08:10:46.808128 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bh7th" event={"ID":"0eb9999f-a946-4946-83e0-6cbf7be82741","Type":"ContainerStarted","Data":"fb13d6ec1a411bb6bc68b586e267cc8d58d9dbd1fe507546980a2481905aacd6"} Nov 24 08:10:46 crc kubenswrapper[4691]: I1124 08:10:46.812002 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wmpvm" event={"ID":"c7e06db1-dbe0-48c4-ba25-ef962e6cd3d7","Type":"ContainerStarted","Data":"8ffa54c36e278db2b1ecae8c10cc402d682ffa5e9c8bad3e75a127373c29d45a"} Nov 24 08:10:46 crc kubenswrapper[4691]: E1124 08:10:46.814805 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bh7th" podUID="0eb9999f-a946-4946-83e0-6cbf7be82741" Nov 24 08:10:46 crc kubenswrapper[4691]: E1124 08:10:46.816400 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wmpvm" podUID="c7e06db1-dbe0-48c4-ba25-ef962e6cd3d7" Nov 24 08:10:46 crc kubenswrapper[4691]: I1124 08:10:46.816768 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kp2bb" event={"ID":"63c87b6f-c210-4837-bde9-87436a88578f","Type":"ContainerStarted","Data":"a06dcd0602f06d7b4180da2c848b01e5730fa68a68a6bca0aa5a4100b4b359b3"} Nov 24 08:10:46 crc kubenswrapper[4691]: E1124 08:10:46.820228 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:4094e7fc11a33e8e2b6768a053cafaf5b122446d23f9113d43d520cb64e9776c\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kp2bb" podUID="63c87b6f-c210-4837-bde9-87436a88578f" Nov 24 08:10:46 crc kubenswrapper[4691]: I1124 08:10:46.837217 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-f7g9v" event={"ID":"bc63ea3b-7e56-46cc-b2e9-2ff7b0cddc2e","Type":"ContainerStarted","Data":"d2e1ed2665e18172ae6887223a4ef63a46c069a041b86ea6e2d879e2953bba0c"} Nov 24 08:10:46 crc kubenswrapper[4691]: I1124 08:10:46.842766 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-8qh9m" event={"ID":"345576fd-a4cd-4c76-8c81-3669a42be294","Type":"ContainerStarted","Data":"8f12bbde6fd3ef0f30ec3dafffcc6288afeb0b9ee2d2a13b2d3f707aa45de05c"} Nov 24 08:10:46 crc kubenswrapper[4691]: E1124 08:10:46.850732 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-864885998-8qh9m" podUID="345576fd-a4cd-4c76-8c81-3669a42be294" Nov 24 08:10:46 crc kubenswrapper[4691]: I1124 08:10:46.854795 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-4czcs" event={"ID":"ccc21638-592f-4e4f-87df-f95f79a5c23e","Type":"ContainerStarted","Data":"a2dcd4f1b3fbd6cfbcffe020a8061914aca5795448aa694e81f74d900c5eeeee"} Nov 24 08:10:46 crc kubenswrapper[4691]: I1124 08:10:46.865473 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-554b4f8994-dck8w" event={"ID":"f46c7222-cbb0-457d-bb11-15d8cb855c8b","Type":"ContainerStarted","Data":"7294eef3620e4ff91a6a84d4c94f3fa3ff78ddaa769ff53cddd6731dc25b5fd7"} Nov 24 08:10:46 crc kubenswrapper[4691]: I1124 08:10:46.889056 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-vnlb4" event={"ID":"f4138dbf-cfaf-4a82-bf69-d6065584d1ba","Type":"ContainerStarted","Data":"a008c8c5276dfd11d14f7c6b0baac152ee0373536b8c5c46bfc4c4078b51f788"} Nov 24 08:10:46 crc kubenswrapper[4691]: I1124 08:10:46.906243 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-sr8nk" event={"ID":"1c460dd6-5f3d-4eae-9436-c46ccd900674","Type":"ContainerStarted","Data":"e875397256899299d0c9eee146430b9d3568457f4dcb5598507a791c5787b9d1"} Nov 24 08:10:46 crc kubenswrapper[4691]: I1124 08:10:46.911568 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-65lbw" event={"ID":"f3bb505d-02c4-49ec-94c5-a349cb5a4468","Type":"ContainerStarted","Data":"ed25a16e83c2b4cd8e2161e198a402f8f8fd4fb74a023e021ca88da9b25da401"} Nov 24 08:10:46 crc kubenswrapper[4691]: I1124 08:10:46.913602 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-sn2x6" event={"ID":"0ed3c5b9-6275-4aa7-9f4c-a5e7ae6404f2","Type":"ContainerStarted","Data":"621ce0db1fac0f94fd6ee5c0d3022ef517f8b0ddafa7d4e0a306fc6252840d7a"} Nov 24 08:10:46 crc kubenswrapper[4691]: E1124 08:10:46.915511 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-65lbw" podUID="f3bb505d-02c4-49ec-94c5-a349cb5a4468" Nov 24 08:10:46 crc kubenswrapper[4691]: E1124 08:10:46.916848 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:5324a6d2f76fc3041023b0cbd09a733ef2b59f310d390e4d6483d219eb96494f\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-sn2x6" podUID="0ed3c5b9-6275-4aa7-9f4c-a5e7ae6404f2" Nov 24 08:10:46 crc kubenswrapper[4691]: I1124 08:10:46.917002 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-clqqr" event={"ID":"c2acb14d-547e-4528-addc-5bb388370b04","Type":"ContainerStarted","Data":"67b55845907741131e96de9dfcd626b0f1688ff42554be9e3ae60f0d116db5a6"} Nov 24 08:10:46 crc kubenswrapper[4691]: I1124 08:10:46.924875 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jctfk" event={"ID":"39df322c-3527-4b0d-a719-4ecbfa944a56","Type":"ContainerStarted","Data":"779cbcc8f8c653accaed49813a09054b1bd59ea7ba73e2c090f82a8e710d309e"} Nov 24 08:10:46 crc kubenswrapper[4691]: I1124 08:10:46.929978 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-ncq2x" event={"ID":"24f62db2-c526-493e-a703-43a661ea0228","Type":"ContainerStarted","Data":"5a28a1db4e8214507242b542792130a101619abff6574661991945e9ea06301a"} Nov 24 08:10:47 crc kubenswrapper[4691]: I1124 08:10:47.491761 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b9f37eec-f8fc-4083-b29a-4e704c802c8a-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d\" (UID: \"b9f37eec-f8fc-4083-b29a-4e704c802c8a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d" Nov 24 08:10:47 crc kubenswrapper[4691]: I1124 08:10:47.499210 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b9f37eec-f8fc-4083-b29a-4e704c802c8a-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d\" (UID: \"b9f37eec-f8fc-4083-b29a-4e704c802c8a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d" Nov 24 08:10:47 crc kubenswrapper[4691]: I1124 08:10:47.644207 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d" Nov 24 08:10:47 crc kubenswrapper[4691]: I1124 08:10:47.673156 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-psr85"] Nov 24 08:10:47 crc kubenswrapper[4691]: I1124 08:10:47.679227 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-psr85" Nov 24 08:10:47 crc kubenswrapper[4691]: I1124 08:10:47.687046 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-psr85"] Nov 24 08:10:47 crc kubenswrapper[4691]: I1124 08:10:47.805428 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/603e76a3-8258-43ec-850b-d2c34845cd8b-metrics-certs\") pod \"openstack-operator-controller-manager-7888ffcffd-8jst5\" (UID: \"603e76a3-8258-43ec-850b-d2c34845cd8b\") " pod="openstack-operators/openstack-operator-controller-manager-7888ffcffd-8jst5" Nov 24 08:10:47 crc kubenswrapper[4691]: I1124 08:10:47.805533 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81f22002-1443-4393-8d6d-22db227fc480-utilities\") pod \"community-operators-psr85\" (UID: \"81f22002-1443-4393-8d6d-22db227fc480\") " pod="openshift-marketplace/community-operators-psr85" Nov 24 08:10:47 crc kubenswrapper[4691]: I1124 08:10:47.805561 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d7tjh\" (UniqueName: \"kubernetes.io/projected/81f22002-1443-4393-8d6d-22db227fc480-kube-api-access-d7tjh\") pod \"community-operators-psr85\" (UID: \"81f22002-1443-4393-8d6d-22db227fc480\") " pod="openshift-marketplace/community-operators-psr85" Nov 24 08:10:47 crc kubenswrapper[4691]: I1124 08:10:47.805636 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81f22002-1443-4393-8d6d-22db227fc480-catalog-content\") pod \"community-operators-psr85\" (UID: \"81f22002-1443-4393-8d6d-22db227fc480\") " pod="openshift-marketplace/community-operators-psr85" Nov 24 08:10:47 crc kubenswrapper[4691]: I1124 08:10:47.814252 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/603e76a3-8258-43ec-850b-d2c34845cd8b-metrics-certs\") pod \"openstack-operator-controller-manager-7888ffcffd-8jst5\" (UID: \"603e76a3-8258-43ec-850b-d2c34845cd8b\") " pod="openstack-operators/openstack-operator-controller-manager-7888ffcffd-8jst5" Nov 24 08:10:47 crc kubenswrapper[4691]: I1124 08:10:47.907667 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d7tjh\" (UniqueName: \"kubernetes.io/projected/81f22002-1443-4393-8d6d-22db227fc480-kube-api-access-d7tjh\") pod \"community-operators-psr85\" (UID: \"81f22002-1443-4393-8d6d-22db227fc480\") " pod="openshift-marketplace/community-operators-psr85" Nov 24 08:10:47 crc kubenswrapper[4691]: I1124 08:10:47.907844 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81f22002-1443-4393-8d6d-22db227fc480-catalog-content\") pod \"community-operators-psr85\" (UID: \"81f22002-1443-4393-8d6d-22db227fc480\") " pod="openshift-marketplace/community-operators-psr85" Nov 24 08:10:47 crc kubenswrapper[4691]: I1124 08:10:47.907958 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81f22002-1443-4393-8d6d-22db227fc480-utilities\") pod \"community-operators-psr85\" (UID: \"81f22002-1443-4393-8d6d-22db227fc480\") " pod="openshift-marketplace/community-operators-psr85" Nov 24 08:10:47 crc kubenswrapper[4691]: I1124 08:10:47.908785 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81f22002-1443-4393-8d6d-22db227fc480-utilities\") pod \"community-operators-psr85\" (UID: \"81f22002-1443-4393-8d6d-22db227fc480\") " pod="openshift-marketplace/community-operators-psr85" Nov 24 08:10:47 crc kubenswrapper[4691]: I1124 08:10:47.909172 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81f22002-1443-4393-8d6d-22db227fc480-catalog-content\") pod \"community-operators-psr85\" (UID: \"81f22002-1443-4393-8d6d-22db227fc480\") " pod="openshift-marketplace/community-operators-psr85" Nov 24 08:10:47 crc kubenswrapper[4691]: I1124 08:10:47.929322 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d7tjh\" (UniqueName: \"kubernetes.io/projected/81f22002-1443-4393-8d6d-22db227fc480-kube-api-access-d7tjh\") pod \"community-operators-psr85\" (UID: \"81f22002-1443-4393-8d6d-22db227fc480\") " pod="openshift-marketplace/community-operators-psr85" Nov 24 08:10:47 crc kubenswrapper[4691]: I1124 08:10:47.942787 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-7888ffcffd-8jst5" Nov 24 08:10:47 crc kubenswrapper[4691]: E1124 08:10:47.971303 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-65lbw" podUID="f3bb505d-02c4-49ec-94c5-a349cb5a4468" Nov 24 08:10:47 crc kubenswrapper[4691]: E1124 08:10:47.971834 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bh7th" podUID="0eb9999f-a946-4946-83e0-6cbf7be82741" Nov 24 08:10:47 crc kubenswrapper[4691]: E1124 08:10:47.971942 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:4094e7fc11a33e8e2b6768a053cafaf5b122446d23f9113d43d520cb64e9776c\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kp2bb" podUID="63c87b6f-c210-4837-bde9-87436a88578f" Nov 24 08:10:47 crc kubenswrapper[4691]: E1124 08:10:47.983404 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-864885998-8qh9m" podUID="345576fd-a4cd-4c76-8c81-3669a42be294" Nov 24 08:10:47 crc kubenswrapper[4691]: E1124 08:10:47.983586 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wmpvm" podUID="c7e06db1-dbe0-48c4-ba25-ef962e6cd3d7" Nov 24 08:10:47 crc kubenswrapper[4691]: E1124 08:10:47.983721 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:5324a6d2f76fc3041023b0cbd09a733ef2b59f310d390e4d6483d219eb96494f\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-sn2x6" podUID="0ed3c5b9-6275-4aa7-9f4c-a5e7ae6404f2" Nov 24 08:10:48 crc kubenswrapper[4691]: I1124 08:10:48.026641 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-psr85" Nov 24 08:10:50 crc kubenswrapper[4691]: I1124 08:10:50.194992 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-st4t6" Nov 24 08:10:50 crc kubenswrapper[4691]: I1124 08:10:50.197161 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-st4t6" Nov 24 08:10:50 crc kubenswrapper[4691]: I1124 08:10:50.261204 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-st4t6" Nov 24 08:10:51 crc kubenswrapper[4691]: I1124 08:10:51.030938 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-st4t6" Nov 24 08:10:51 crc kubenswrapper[4691]: I1124 08:10:51.661533 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-st4t6"] Nov 24 08:10:53 crc kubenswrapper[4691]: I1124 08:10:53.007525 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-st4t6" podUID="c5492776-03c1-422f-8097-e69c5fbf459b" containerName="registry-server" containerID="cri-o://1502cd567dc6711d9838553e0cea26e64fcbcee311c4f7c530f3f7f3aeb5e761" gracePeriod=2 Nov 24 08:10:54 crc kubenswrapper[4691]: I1124 08:10:54.014306 4691 generic.go:334] "Generic (PLEG): container finished" podID="c5492776-03c1-422f-8097-e69c5fbf459b" containerID="1502cd567dc6711d9838553e0cea26e64fcbcee311c4f7c530f3f7f3aeb5e761" exitCode=0 Nov 24 08:10:54 crc kubenswrapper[4691]: I1124 08:10:54.014396 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-st4t6" event={"ID":"c5492776-03c1-422f-8097-e69c5fbf459b","Type":"ContainerDied","Data":"1502cd567dc6711d9838553e0cea26e64fcbcee311c4f7c530f3f7f3aeb5e761"} Nov 24 08:10:54 crc kubenswrapper[4691]: I1124 08:10:54.269191 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-bb2l8" Nov 24 08:10:54 crc kubenswrapper[4691]: I1124 08:10:54.315536 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-bb2l8" Nov 24 08:10:55 crc kubenswrapper[4691]: I1124 08:10:55.263571 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bb2l8"] Nov 24 08:10:56 crc kubenswrapper[4691]: I1124 08:10:56.028842 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-bb2l8" podUID="29cbcb95-d722-413b-893c-f04bdb5ea301" containerName="registry-server" containerID="cri-o://0eb39cdc698157e16739daa225e56d9787c696c5655714a544c8719dc1bd64d5" gracePeriod=2 Nov 24 08:10:57 crc kubenswrapper[4691]: I1124 08:10:57.038473 4691 generic.go:334] "Generic (PLEG): container finished" podID="29cbcb95-d722-413b-893c-f04bdb5ea301" containerID="0eb39cdc698157e16739daa225e56d9787c696c5655714a544c8719dc1bd64d5" exitCode=0 Nov 24 08:10:57 crc kubenswrapper[4691]: I1124 08:10:57.038543 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bb2l8" event={"ID":"29cbcb95-d722-413b-893c-f04bdb5ea301","Type":"ContainerDied","Data":"0eb39cdc698157e16739daa225e56d9787c696c5655714a544c8719dc1bd64d5"} Nov 24 08:10:57 crc kubenswrapper[4691]: I1124 08:10:57.672929 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-m7pj8"] Nov 24 08:10:57 crc kubenswrapper[4691]: I1124 08:10:57.674866 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-m7pj8" Nov 24 08:10:57 crc kubenswrapper[4691]: I1124 08:10:57.688178 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-m7pj8"] Nov 24 08:10:57 crc kubenswrapper[4691]: I1124 08:10:57.761832 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c145a1b2-551b-4166-8f18-9e0612d384de-catalog-content\") pod \"redhat-marketplace-m7pj8\" (UID: \"c145a1b2-551b-4166-8f18-9e0612d384de\") " pod="openshift-marketplace/redhat-marketplace-m7pj8" Nov 24 08:10:57 crc kubenswrapper[4691]: I1124 08:10:57.761928 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c145a1b2-551b-4166-8f18-9e0612d384de-utilities\") pod \"redhat-marketplace-m7pj8\" (UID: \"c145a1b2-551b-4166-8f18-9e0612d384de\") " pod="openshift-marketplace/redhat-marketplace-m7pj8" Nov 24 08:10:57 crc kubenswrapper[4691]: I1124 08:10:57.761961 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2tbgg\" (UniqueName: \"kubernetes.io/projected/c145a1b2-551b-4166-8f18-9e0612d384de-kube-api-access-2tbgg\") pod \"redhat-marketplace-m7pj8\" (UID: \"c145a1b2-551b-4166-8f18-9e0612d384de\") " pod="openshift-marketplace/redhat-marketplace-m7pj8" Nov 24 08:10:57 crc kubenswrapper[4691]: I1124 08:10:57.863649 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c145a1b2-551b-4166-8f18-9e0612d384de-catalog-content\") pod \"redhat-marketplace-m7pj8\" (UID: \"c145a1b2-551b-4166-8f18-9e0612d384de\") " pod="openshift-marketplace/redhat-marketplace-m7pj8" Nov 24 08:10:57 crc kubenswrapper[4691]: I1124 08:10:57.864407 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c145a1b2-551b-4166-8f18-9e0612d384de-catalog-content\") pod \"redhat-marketplace-m7pj8\" (UID: \"c145a1b2-551b-4166-8f18-9e0612d384de\") " pod="openshift-marketplace/redhat-marketplace-m7pj8" Nov 24 08:10:57 crc kubenswrapper[4691]: I1124 08:10:57.864351 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c145a1b2-551b-4166-8f18-9e0612d384de-utilities\") pod \"redhat-marketplace-m7pj8\" (UID: \"c145a1b2-551b-4166-8f18-9e0612d384de\") " pod="openshift-marketplace/redhat-marketplace-m7pj8" Nov 24 08:10:57 crc kubenswrapper[4691]: I1124 08:10:57.864794 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2tbgg\" (UniqueName: \"kubernetes.io/projected/c145a1b2-551b-4166-8f18-9e0612d384de-kube-api-access-2tbgg\") pod \"redhat-marketplace-m7pj8\" (UID: \"c145a1b2-551b-4166-8f18-9e0612d384de\") " pod="openshift-marketplace/redhat-marketplace-m7pj8" Nov 24 08:10:57 crc kubenswrapper[4691]: I1124 08:10:57.864801 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c145a1b2-551b-4166-8f18-9e0612d384de-utilities\") pod \"redhat-marketplace-m7pj8\" (UID: \"c145a1b2-551b-4166-8f18-9e0612d384de\") " pod="openshift-marketplace/redhat-marketplace-m7pj8" Nov 24 08:10:57 crc kubenswrapper[4691]: I1124 08:10:57.887152 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2tbgg\" (UniqueName: \"kubernetes.io/projected/c145a1b2-551b-4166-8f18-9e0612d384de-kube-api-access-2tbgg\") pod \"redhat-marketplace-m7pj8\" (UID: \"c145a1b2-551b-4166-8f18-9e0612d384de\") " pod="openshift-marketplace/redhat-marketplace-m7pj8" Nov 24 08:10:57 crc kubenswrapper[4691]: I1124 08:10:57.993379 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-m7pj8" Nov 24 08:11:00 crc kubenswrapper[4691]: E1124 08:11:00.196224 4691 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1502cd567dc6711d9838553e0cea26e64fcbcee311c4f7c530f3f7f3aeb5e761 is running failed: container process not found" containerID="1502cd567dc6711d9838553e0cea26e64fcbcee311c4f7c530f3f7f3aeb5e761" cmd=["grpc_health_probe","-addr=:50051"] Nov 24 08:11:00 crc kubenswrapper[4691]: E1124 08:11:00.197274 4691 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1502cd567dc6711d9838553e0cea26e64fcbcee311c4f7c530f3f7f3aeb5e761 is running failed: container process not found" containerID="1502cd567dc6711d9838553e0cea26e64fcbcee311c4f7c530f3f7f3aeb5e761" cmd=["grpc_health_probe","-addr=:50051"] Nov 24 08:11:00 crc kubenswrapper[4691]: E1124 08:11:00.197658 4691 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1502cd567dc6711d9838553e0cea26e64fcbcee311c4f7c530f3f7f3aeb5e761 is running failed: container process not found" containerID="1502cd567dc6711d9838553e0cea26e64fcbcee311c4f7c530f3f7f3aeb5e761" cmd=["grpc_health_probe","-addr=:50051"] Nov 24 08:11:00 crc kubenswrapper[4691]: E1124 08:11:00.197687 4691 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1502cd567dc6711d9838553e0cea26e64fcbcee311c4f7c530f3f7f3aeb5e761 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/certified-operators-st4t6" podUID="c5492776-03c1-422f-8097-e69c5fbf459b" containerName="registry-server" Nov 24 08:11:00 crc kubenswrapper[4691]: E1124 08:11:00.503576 4691 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/horizon-operator@sha256:848f4c43c6bdd4e33e3ce1d147a85b9b6a6124a150bd5155dce421ef539259e9" Nov 24 08:11:00 crc kubenswrapper[4691]: E1124 08:11:00.503867 4691 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/horizon-operator@sha256:848f4c43c6bdd4e33e3ce1d147a85b9b6a6124a150bd5155dce421ef539259e9,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-nxldp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-operator-controller-manager-68c9694994-jctfk_openstack-operators(39df322c-3527-4b0d-a719-4ecbfa944a56): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 08:11:01 crc kubenswrapper[4691]: E1124 08:11:01.235740 4691 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/glance-operator@sha256:d38faa9070da05487afdaa9e261ad39274c2ed862daf42efa460a040431f1991" Nov 24 08:11:01 crc kubenswrapper[4691]: E1124 08:11:01.236002 4691 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/glance-operator@sha256:d38faa9070da05487afdaa9e261ad39274c2ed862daf42efa460a040431f1991,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-8lbqp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-68b95954c9-f7g9v_openstack-operators(bc63ea3b-7e56-46cc-b2e9-2ff7b0cddc2e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 08:11:01 crc kubenswrapper[4691]: E1124 08:11:01.875928 4691 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:3ef72bbd7cce89ff54d850ff44ca6d7b2360834a502da3d561aeb6fd3d9af50a" Nov 24 08:11:01 crc kubenswrapper[4691]: E1124 08:11:01.876125 4691 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:3ef72bbd7cce89ff54d850ff44ca6d7b2360834a502da3d561aeb6fd3d9af50a,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dfz7j,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-748dc6576f-2w275_openstack-operators(be284da4-49c2-4967-a810-eb5dbece93a3): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 08:11:02 crc kubenswrapper[4691]: E1124 08:11:02.000635 4691 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.132:5001/openstack-k8s-operators/ovn-operator:1ea648c9c2eb0f856abec8ea9f09c9985045c573" Nov 24 08:11:02 crc kubenswrapper[4691]: E1124 08:11:02.001007 4691 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.132:5001/openstack-k8s-operators/ovn-operator:1ea648c9c2eb0f856abec8ea9f09c9985045c573" Nov 24 08:11:02 crc kubenswrapper[4691]: E1124 08:11:02.001176 4691 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.102.83.132:5001/openstack-k8s-operators/ovn-operator:1ea648c9c2eb0f856abec8ea9f09c9985045c573,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-gc9km,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-554b4f8994-dck8w_openstack-operators(f46c7222-cbb0-457d-bb11-15d8cb855c8b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 08:11:02 crc kubenswrapper[4691]: I1124 08:11:02.695854 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d"] Nov 24 08:11:03 crc kubenswrapper[4691]: I1124 08:11:03.471629 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bb2l8" Nov 24 08:11:03 crc kubenswrapper[4691]: I1124 08:11:03.480227 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-st4t6" Nov 24 08:11:03 crc kubenswrapper[4691]: I1124 08:11:03.575210 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29cbcb95-d722-413b-893c-f04bdb5ea301-catalog-content\") pod \"29cbcb95-d722-413b-893c-f04bdb5ea301\" (UID: \"29cbcb95-d722-413b-893c-f04bdb5ea301\") " Nov 24 08:11:03 crc kubenswrapper[4691]: I1124 08:11:03.575245 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29cbcb95-d722-413b-893c-f04bdb5ea301-utilities\") pod \"29cbcb95-d722-413b-893c-f04bdb5ea301\" (UID: \"29cbcb95-d722-413b-893c-f04bdb5ea301\") " Nov 24 08:11:03 crc kubenswrapper[4691]: I1124 08:11:03.575359 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c5492776-03c1-422f-8097-e69c5fbf459b-utilities\") pod \"c5492776-03c1-422f-8097-e69c5fbf459b\" (UID: \"c5492776-03c1-422f-8097-e69c5fbf459b\") " Nov 24 08:11:03 crc kubenswrapper[4691]: I1124 08:11:03.575420 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xs2nv\" (UniqueName: \"kubernetes.io/projected/c5492776-03c1-422f-8097-e69c5fbf459b-kube-api-access-xs2nv\") pod \"c5492776-03c1-422f-8097-e69c5fbf459b\" (UID: \"c5492776-03c1-422f-8097-e69c5fbf459b\") " Nov 24 08:11:03 crc kubenswrapper[4691]: I1124 08:11:03.575466 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c5492776-03c1-422f-8097-e69c5fbf459b-catalog-content\") pod \"c5492776-03c1-422f-8097-e69c5fbf459b\" (UID: \"c5492776-03c1-422f-8097-e69c5fbf459b\") " Nov 24 08:11:03 crc kubenswrapper[4691]: I1124 08:11:03.575518 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nkrxc\" (UniqueName: \"kubernetes.io/projected/29cbcb95-d722-413b-893c-f04bdb5ea301-kube-api-access-nkrxc\") pod \"29cbcb95-d722-413b-893c-f04bdb5ea301\" (UID: \"29cbcb95-d722-413b-893c-f04bdb5ea301\") " Nov 24 08:11:03 crc kubenswrapper[4691]: I1124 08:11:03.576507 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5492776-03c1-422f-8097-e69c5fbf459b-utilities" (OuterVolumeSpecName: "utilities") pod "c5492776-03c1-422f-8097-e69c5fbf459b" (UID: "c5492776-03c1-422f-8097-e69c5fbf459b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:11:03 crc kubenswrapper[4691]: I1124 08:11:03.576515 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/29cbcb95-d722-413b-893c-f04bdb5ea301-utilities" (OuterVolumeSpecName: "utilities") pod "29cbcb95-d722-413b-893c-f04bdb5ea301" (UID: "29cbcb95-d722-413b-893c-f04bdb5ea301"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:11:03 crc kubenswrapper[4691]: I1124 08:11:03.583128 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29cbcb95-d722-413b-893c-f04bdb5ea301-kube-api-access-nkrxc" (OuterVolumeSpecName: "kube-api-access-nkrxc") pod "29cbcb95-d722-413b-893c-f04bdb5ea301" (UID: "29cbcb95-d722-413b-893c-f04bdb5ea301"). InnerVolumeSpecName "kube-api-access-nkrxc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:11:03 crc kubenswrapper[4691]: I1124 08:11:03.597745 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5492776-03c1-422f-8097-e69c5fbf459b-kube-api-access-xs2nv" (OuterVolumeSpecName: "kube-api-access-xs2nv") pod "c5492776-03c1-422f-8097-e69c5fbf459b" (UID: "c5492776-03c1-422f-8097-e69c5fbf459b"). InnerVolumeSpecName "kube-api-access-xs2nv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:11:03 crc kubenswrapper[4691]: I1124 08:11:03.629680 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5492776-03c1-422f-8097-e69c5fbf459b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c5492776-03c1-422f-8097-e69c5fbf459b" (UID: "c5492776-03c1-422f-8097-e69c5fbf459b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:11:03 crc kubenswrapper[4691]: I1124 08:11:03.677275 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nkrxc\" (UniqueName: \"kubernetes.io/projected/29cbcb95-d722-413b-893c-f04bdb5ea301-kube-api-access-nkrxc\") on node \"crc\" DevicePath \"\"" Nov 24 08:11:03 crc kubenswrapper[4691]: I1124 08:11:03.677316 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29cbcb95-d722-413b-893c-f04bdb5ea301-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 08:11:03 crc kubenswrapper[4691]: I1124 08:11:03.677328 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c5492776-03c1-422f-8097-e69c5fbf459b-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 08:11:03 crc kubenswrapper[4691]: I1124 08:11:03.677337 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xs2nv\" (UniqueName: \"kubernetes.io/projected/c5492776-03c1-422f-8097-e69c5fbf459b-kube-api-access-xs2nv\") on node \"crc\" DevicePath \"\"" Nov 24 08:11:03 crc kubenswrapper[4691]: I1124 08:11:03.677348 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c5492776-03c1-422f-8097-e69c5fbf459b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 08:11:03 crc kubenswrapper[4691]: I1124 08:11:03.679491 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/29cbcb95-d722-413b-893c-f04bdb5ea301-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "29cbcb95-d722-413b-893c-f04bdb5ea301" (UID: "29cbcb95-d722-413b-893c-f04bdb5ea301"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:11:03 crc kubenswrapper[4691]: I1124 08:11:03.779439 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29cbcb95-d722-413b-893c-f04bdb5ea301-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 08:11:03 crc kubenswrapper[4691]: I1124 08:11:03.851790 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-psr85"] Nov 24 08:11:04 crc kubenswrapper[4691]: I1124 08:11:04.100822 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d" event={"ID":"b9f37eec-f8fc-4083-b29a-4e704c802c8a","Type":"ContainerStarted","Data":"9637448c5eb814f2067f1a120fabc1dc106dd16d45ed58d77885efc6c1b591c9"} Nov 24 08:11:04 crc kubenswrapper[4691]: I1124 08:11:04.106305 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-st4t6" Nov 24 08:11:04 crc kubenswrapper[4691]: I1124 08:11:04.106311 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-st4t6" event={"ID":"c5492776-03c1-422f-8097-e69c5fbf459b","Type":"ContainerDied","Data":"b75e45a11993753ae0c8af0603f175e7d4cfef6ed51c4cba054cbea746c2edd6"} Nov 24 08:11:04 crc kubenswrapper[4691]: I1124 08:11:04.106755 4691 scope.go:117] "RemoveContainer" containerID="1502cd567dc6711d9838553e0cea26e64fcbcee311c4f7c530f3f7f3aeb5e761" Nov 24 08:11:04 crc kubenswrapper[4691]: I1124 08:11:04.109939 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bb2l8" event={"ID":"29cbcb95-d722-413b-893c-f04bdb5ea301","Type":"ContainerDied","Data":"fc6f59b0006f3ed0c834a9fede06f174f2f11e9cd58e0391e5c3213f50ad652f"} Nov 24 08:11:04 crc kubenswrapper[4691]: I1124 08:11:04.110066 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bb2l8" Nov 24 08:11:04 crc kubenswrapper[4691]: I1124 08:11:04.140147 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-st4t6"] Nov 24 08:11:04 crc kubenswrapper[4691]: I1124 08:11:04.144718 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-st4t6"] Nov 24 08:11:04 crc kubenswrapper[4691]: I1124 08:11:04.164989 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bb2l8"] Nov 24 08:11:04 crc kubenswrapper[4691]: I1124 08:11:04.169070 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-bb2l8"] Nov 24 08:11:04 crc kubenswrapper[4691]: I1124 08:11:04.781707 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29cbcb95-d722-413b-893c-f04bdb5ea301" path="/var/lib/kubelet/pods/29cbcb95-d722-413b-893c-f04bdb5ea301/volumes" Nov 24 08:11:04 crc kubenswrapper[4691]: I1124 08:11:04.783213 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5492776-03c1-422f-8097-e69c5fbf459b" path="/var/lib/kubelet/pods/c5492776-03c1-422f-8097-e69c5fbf459b/volumes" Nov 24 08:11:05 crc kubenswrapper[4691]: I1124 08:11:05.239704 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7888ffcffd-8jst5"] Nov 24 08:11:05 crc kubenswrapper[4691]: W1124 08:11:05.378728 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod81f22002_1443_4393_8d6d_22db227fc480.slice/crio-cca9693a71910b1d30d5c43fc1a449c78a981dd5ffca8bd4ba37a91d8ce1416e WatchSource:0}: Error finding container cca9693a71910b1d30d5c43fc1a449c78a981dd5ffca8bd4ba37a91d8ce1416e: Status 404 returned error can't find the container with id cca9693a71910b1d30d5c43fc1a449c78a981dd5ffca8bd4ba37a91d8ce1416e Nov 24 08:11:05 crc kubenswrapper[4691]: I1124 08:11:05.766379 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-m7pj8"] Nov 24 08:11:06 crc kubenswrapper[4691]: I1124 08:11:06.134765 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ppdhs" event={"ID":"22fec998-136d-4bc0-9db1-1e4ac6e1107c","Type":"ContainerStarted","Data":"d00e57de1a7e53520e73016417621f61b5362056c9308cc50b14d60511e3be9a"} Nov 24 08:11:06 crc kubenswrapper[4691]: I1124 08:11:06.136680 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7888ffcffd-8jst5" event={"ID":"603e76a3-8258-43ec-850b-d2c34845cd8b","Type":"ContainerStarted","Data":"666c447b6a0df22baa2c5780a21c9f1a3832ef169708aef29be28f78395d6144"} Nov 24 08:11:06 crc kubenswrapper[4691]: I1124 08:11:06.138203 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-psr85" event={"ID":"81f22002-1443-4393-8d6d-22db227fc480","Type":"ContainerStarted","Data":"cca9693a71910b1d30d5c43fc1a449c78a981dd5ffca8bd4ba37a91d8ce1416e"} Nov 24 08:11:06 crc kubenswrapper[4691]: W1124 08:11:06.359666 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc145a1b2_551b_4166_8f18_9e0612d384de.slice/crio-ba6b6e33b7ad8dae91c77df824e54e566ee7d35cbcb7dc81e753525351e854b7 WatchSource:0}: Error finding container ba6b6e33b7ad8dae91c77df824e54e566ee7d35cbcb7dc81e753525351e854b7: Status 404 returned error can't find the container with id ba6b6e33b7ad8dae91c77df824e54e566ee7d35cbcb7dc81e753525351e854b7 Nov 24 08:11:07 crc kubenswrapper[4691]: I1124 08:11:07.149916 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m7pj8" event={"ID":"c145a1b2-551b-4166-8f18-9e0612d384de","Type":"ContainerStarted","Data":"ba6b6e33b7ad8dae91c77df824e54e566ee7d35cbcb7dc81e753525351e854b7"} Nov 24 08:11:07 crc kubenswrapper[4691]: I1124 08:11:07.473693 4691 scope.go:117] "RemoveContainer" containerID="7089cb8f3da2e0988ca4e9f8a67ab76a25791ace26453b05003cd9f52129ef7a" Nov 24 08:11:08 crc kubenswrapper[4691]: I1124 08:11:08.165055 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-ncq2x" event={"ID":"24f62db2-c526-493e-a703-43a661ea0228","Type":"ContainerStarted","Data":"a5d40acd07deaa1e9c69fc5b359aad8245375bcc06f420f4aec707d8d8231fa7"} Nov 24 08:11:08 crc kubenswrapper[4691]: I1124 08:11:08.953317 4691 scope.go:117] "RemoveContainer" containerID="a8deeb1991762b71afa08ebe2318b831294eee56d96dfcf5ea05d955fee0e9cd" Nov 24 08:11:09 crc kubenswrapper[4691]: I1124 08:11:09.174274 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-cql69" event={"ID":"df3746c8-ec8b-406e-b2f5-7bd93dd46646","Type":"ContainerStarted","Data":"3c6c1602352111a781a9b32d68df8a938782af31594848e66325db044f20075f"} Nov 24 08:11:09 crc kubenswrapper[4691]: I1124 08:11:09.179341 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-sr8nk" event={"ID":"1c460dd6-5f3d-4eae-9436-c46ccd900674","Type":"ContainerStarted","Data":"95884687014c6da99a90c84629726267abbd6b296822784994376552c4df2a8c"} Nov 24 08:11:10 crc kubenswrapper[4691]: I1124 08:11:10.191121 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-8tqbw" event={"ID":"66685e8a-e196-444b-9149-e7861ff2c8b5","Type":"ContainerStarted","Data":"9a15f9235d445fbe00b44a491bc49cd8bd212c8ad736c6f7c58ecb4b511e79d0"} Nov 24 08:11:10 crc kubenswrapper[4691]: I1124 08:11:10.198224 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-6jgx4" event={"ID":"132ed997-05f1-4484-a11a-3e282b0e889b","Type":"ContainerStarted","Data":"7a3ce98a85c3cdf687e691d530695df09ad1e624acca4a9571f297005287552c"} Nov 24 08:11:10 crc kubenswrapper[4691]: I1124 08:11:10.200701 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-v7vtk" event={"ID":"7e82629b-ee44-488b-bdd3-58f078070f7e","Type":"ContainerStarted","Data":"f93cbbc2e9eb98da3fbc9b25ed4919025b5037082aac3431ad5f9d2b321a9b2d"} Nov 24 08:11:10 crc kubenswrapper[4691]: I1124 08:11:10.202291 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-4czcs" event={"ID":"ccc21638-592f-4e4f-87df-f95f79a5c23e","Type":"ContainerStarted","Data":"249d4bb83e4ca4ef35295059f79eb5cdb81ed7f1c6f604637e6ed77429e856bc"} Nov 24 08:11:10 crc kubenswrapper[4691]: I1124 08:11:10.203525 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-clqqr" event={"ID":"c2acb14d-547e-4528-addc-5bb388370b04","Type":"ContainerStarted","Data":"e2aa8b9ac2db42ce3eef5d868ad25f341ad188dcb03b501ee9056fcbc1d8baa3"} Nov 24 08:11:10 crc kubenswrapper[4691]: I1124 08:11:10.209484 4691 scope.go:117] "RemoveContainer" containerID="0eb39cdc698157e16739daa225e56d9787c696c5655714a544c8719dc1bd64d5" Nov 24 08:11:10 crc kubenswrapper[4691]: I1124 08:11:10.414108 4691 scope.go:117] "RemoveContainer" containerID="19e90b80868244715aee6f90bf45117ab1b221de14ccfd13240b9eb9fb36d3d2" Nov 24 08:11:11 crc kubenswrapper[4691]: I1124 08:11:11.024009 4691 scope.go:117] "RemoveContainer" containerID="f5f3da8ad5456d8ce68bb72db1ba882f4fc6fe80ea58175bf692c1bd7f4bcecd" Nov 24 08:11:11 crc kubenswrapper[4691]: I1124 08:11:11.218588 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-vnlb4" event={"ID":"f4138dbf-cfaf-4a82-bf69-d6065584d1ba","Type":"ContainerStarted","Data":"1c71b1d96ec72dcfdce044086c4a073b54d22a9e56a8ae0289549fbd658bcf09"} Nov 24 08:11:11 crc kubenswrapper[4691]: I1124 08:11:11.221015 4691 generic.go:334] "Generic (PLEG): container finished" podID="81f22002-1443-4393-8d6d-22db227fc480" containerID="d39ee9682f21753d63314af05359e82362fca0cb84e788d513a93142100dbce6" exitCode=0 Nov 24 08:11:11 crc kubenswrapper[4691]: I1124 08:11:11.221593 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-psr85" event={"ID":"81f22002-1443-4393-8d6d-22db227fc480","Type":"ContainerDied","Data":"d39ee9682f21753d63314af05359e82362fca0cb84e788d513a93142100dbce6"} Nov 24 08:11:11 crc kubenswrapper[4691]: I1124 08:11:11.226871 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7888ffcffd-8jst5" event={"ID":"603e76a3-8258-43ec-850b-d2c34845cd8b","Type":"ContainerStarted","Data":"4c141b5ba7e2ea635ef42b02f8e75e853cb58d98e5d9c6dc98a0fcbc843cd030"} Nov 24 08:11:11 crc kubenswrapper[4691]: I1124 08:11:11.226909 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-7888ffcffd-8jst5" Nov 24 08:11:11 crc kubenswrapper[4691]: I1124 08:11:11.230043 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-nfx6g" event={"ID":"f8a9119f-fc7e-4bb6-89da-91f7655c633d","Type":"ContainerStarted","Data":"69501b087608abe5405002164baddd567f61cb789a4c6a5117d890761839420c"} Nov 24 08:11:11 crc kubenswrapper[4691]: I1124 08:11:11.247916 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wmpvm" event={"ID":"c7e06db1-dbe0-48c4-ba25-ef962e6cd3d7","Type":"ContainerStarted","Data":"df7d418694710c0440015a58fa0fd6dc8e67f3e94f99172128a01103ce5282c1"} Nov 24 08:11:11 crc kubenswrapper[4691]: I1124 08:11:11.261970 4691 generic.go:334] "Generic (PLEG): container finished" podID="c145a1b2-551b-4166-8f18-9e0612d384de" containerID="bb57246081c06b1e65313b8d8c9b551fa896642ac3862acc6f6e7b6d4561f09f" exitCode=0 Nov 24 08:11:11 crc kubenswrapper[4691]: I1124 08:11:11.262015 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m7pj8" event={"ID":"c145a1b2-551b-4166-8f18-9e0612d384de","Type":"ContainerDied","Data":"bb57246081c06b1e65313b8d8c9b551fa896642ac3862acc6f6e7b6d4561f09f"} Nov 24 08:11:11 crc kubenswrapper[4691]: I1124 08:11:11.286508 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-7888ffcffd-8jst5" podStartSLOduration=28.286490396 podStartE2EDuration="28.286490396s" podCreationTimestamp="2025-11-24 08:10:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:11:11.285151877 +0000 UTC m=+833.284101136" watchObservedRunningTime="2025-11-24 08:11:11.286490396 +0000 UTC m=+833.285439635" Nov 24 08:11:12 crc kubenswrapper[4691]: I1124 08:11:12.283356 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-sn2x6" event={"ID":"0ed3c5b9-6275-4aa7-9f4c-a5e7ae6404f2","Type":"ContainerStarted","Data":"3da2c112d67f2adfc888b4bfd61bbd4d8dec8108ae9645cf51881ce3ca76180d"} Nov 24 08:11:12 crc kubenswrapper[4691]: I1124 08:11:12.286162 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bh7th" event={"ID":"0eb9999f-a946-4946-83e0-6cbf7be82741","Type":"ContainerStarted","Data":"b6047d90df160446c2660d12b83cd437b2caf4cb7610f94bc1051b04a4cf288a"} Nov 24 08:11:12 crc kubenswrapper[4691]: E1124 08:11:12.728803 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-f7g9v" podUID="bc63ea3b-7e56-46cc-b2e9-2ff7b0cddc2e" Nov 24 08:11:12 crc kubenswrapper[4691]: E1124 08:11:12.878821 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-2w275" podUID="be284da4-49c2-4967-a810-eb5dbece93a3" Nov 24 08:11:13 crc kubenswrapper[4691]: E1124 08:11:13.127657 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ovn-operator-controller-manager-554b4f8994-dck8w" podUID="f46c7222-cbb0-457d-bb11-15d8cb855c8b" Nov 24 08:11:13 crc kubenswrapper[4691]: I1124 08:11:13.316134 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-ncq2x" event={"ID":"24f62db2-c526-493e-a703-43a661ea0228","Type":"ContainerStarted","Data":"5e70b02bd149ddaca82db4cc507127c89eb7db0a8bcff6e988446b478db1dcce"} Nov 24 08:11:13 crc kubenswrapper[4691]: I1124 08:11:13.316555 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-ncq2x" Nov 24 08:11:13 crc kubenswrapper[4691]: I1124 08:11:13.319218 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-vnlb4" event={"ID":"f4138dbf-cfaf-4a82-bf69-d6065584d1ba","Type":"ContainerStarted","Data":"7d449a266d74b92e35c85ca01d0ceb845ab4f3fb145b5b407688889fe718c25a"} Nov 24 08:11:13 crc kubenswrapper[4691]: I1124 08:11:13.319282 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-vnlb4" Nov 24 08:11:13 crc kubenswrapper[4691]: I1124 08:11:13.321033 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-ncq2x" Nov 24 08:11:13 crc kubenswrapper[4691]: I1124 08:11:13.324710 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-65lbw" event={"ID":"f3bb505d-02c4-49ec-94c5-a349cb5a4468","Type":"ContainerStarted","Data":"0514fe4c2da332192fa18979239f08abb4b8e2ae4760277a541f0b5f110abe02"} Nov 24 08:11:13 crc kubenswrapper[4691]: I1124 08:11:13.328943 4691 generic.go:334] "Generic (PLEG): container finished" podID="81f22002-1443-4393-8d6d-22db227fc480" containerID="65b14e24203b0fb0f767df422884fe5364325bc0b4a39be7bbaf815ea23c78f4" exitCode=0 Nov 24 08:11:13 crc kubenswrapper[4691]: I1124 08:11:13.329020 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-psr85" event={"ID":"81f22002-1443-4393-8d6d-22db227fc480","Type":"ContainerDied","Data":"65b14e24203b0fb0f767df422884fe5364325bc0b4a39be7bbaf815ea23c78f4"} Nov 24 08:11:13 crc kubenswrapper[4691]: I1124 08:11:13.332221 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-v7vtk" event={"ID":"7e82629b-ee44-488b-bdd3-58f078070f7e","Type":"ContainerStarted","Data":"5bff4b7147694a7f9ce8697e544b0d5da6ffa8dabd68b9e79f620ef610aca81e"} Nov 24 08:11:13 crc kubenswrapper[4691]: I1124 08:11:13.332626 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-v7vtk" Nov 24 08:11:13 crc kubenswrapper[4691]: I1124 08:11:13.341360 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kp2bb" event={"ID":"63c87b6f-c210-4837-bde9-87436a88578f","Type":"ContainerStarted","Data":"065b0b7db68f88e07b5a8f7683b7593d9cef1e197efa695e1bb4034c5c9d24aa"} Nov 24 08:11:13 crc kubenswrapper[4691]: I1124 08:11:13.349874 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d" event={"ID":"b9f37eec-f8fc-4083-b29a-4e704c802c8a","Type":"ContainerStarted","Data":"ed4bc4b4ba6ef56a4810869c119c781bd3c88dca05588c1245aa8fe4962dff03"} Nov 24 08:11:13 crc kubenswrapper[4691]: I1124 08:11:13.352412 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-ncq2x" podStartSLOduration=3.722040474 podStartE2EDuration="30.352385491s" podCreationTimestamp="2025-11-24 08:10:43 +0000 UTC" firstStartedPulling="2025-11-24 08:10:45.89922647 +0000 UTC m=+807.898175719" lastFinishedPulling="2025-11-24 08:11:12.529571477 +0000 UTC m=+834.528520736" observedRunningTime="2025-11-24 08:11:13.346934103 +0000 UTC m=+835.345883372" watchObservedRunningTime="2025-11-24 08:11:13.352385491 +0000 UTC m=+835.351334740" Nov 24 08:11:13 crc kubenswrapper[4691]: I1124 08:11:13.361108 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ppdhs" event={"ID":"22fec998-136d-4bc0-9db1-1e4ac6e1107c","Type":"ContainerStarted","Data":"9d50f4a6602cab816910ede25326c649d91442a7c532b6186dd229ceee914ad3"} Nov 24 08:11:13 crc kubenswrapper[4691]: I1124 08:11:13.362025 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ppdhs" Nov 24 08:11:13 crc kubenswrapper[4691]: I1124 08:11:13.363798 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-8qh9m" event={"ID":"345576fd-a4cd-4c76-8c81-3669a42be294","Type":"ContainerStarted","Data":"eee23c6a8c43d203651b1a81cf60de8412e84e8644568a1c4bd28faf179e03f8"} Nov 24 08:11:13 crc kubenswrapper[4691]: I1124 08:11:13.366920 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-554b4f8994-dck8w" event={"ID":"f46c7222-cbb0-457d-bb11-15d8cb855c8b","Type":"ContainerStarted","Data":"bdc88dbd8c369fcd23b75d714083f74eee8c63bb5f3816ab78cd7a95ec89b3b1"} Nov 24 08:11:13 crc kubenswrapper[4691]: I1124 08:11:13.373281 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ppdhs" Nov 24 08:11:13 crc kubenswrapper[4691]: I1124 08:11:13.378184 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-v7vtk" podStartSLOduration=3.835621294 podStartE2EDuration="30.378168438s" podCreationTimestamp="2025-11-24 08:10:43 +0000 UTC" firstStartedPulling="2025-11-24 08:10:45.996854258 +0000 UTC m=+807.995803497" lastFinishedPulling="2025-11-24 08:11:12.539401382 +0000 UTC m=+834.538350641" observedRunningTime="2025-11-24 08:11:13.37686789 +0000 UTC m=+835.375817139" watchObservedRunningTime="2025-11-24 08:11:13.378168438 +0000 UTC m=+835.377117687" Nov 24 08:11:13 crc kubenswrapper[4691]: I1124 08:11:13.392512 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-2w275" event={"ID":"be284da4-49c2-4967-a810-eb5dbece93a3","Type":"ContainerStarted","Data":"26226b0d1003514e3dd369e92ee3250978ba99dcb0431fa5629eadb182a29fe1"} Nov 24 08:11:13 crc kubenswrapper[4691]: I1124 08:11:13.406860 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-f7g9v" event={"ID":"bc63ea3b-7e56-46cc-b2e9-2ff7b0cddc2e","Type":"ContainerStarted","Data":"db902a0f0b83644c926bf63da2345cbd7d36f9934e820169ed4839666e9d45c2"} Nov 24 08:11:13 crc kubenswrapper[4691]: I1124 08:11:13.439734 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-65lbw" podStartSLOduration=6.160983134 podStartE2EDuration="29.439717071s" podCreationTimestamp="2025-11-24 08:10:44 +0000 UTC" firstStartedPulling="2025-11-24 08:10:46.365703357 +0000 UTC m=+808.364652606" lastFinishedPulling="2025-11-24 08:11:09.644437304 +0000 UTC m=+831.643386543" observedRunningTime="2025-11-24 08:11:13.434543481 +0000 UTC m=+835.433492740" watchObservedRunningTime="2025-11-24 08:11:13.439717071 +0000 UTC m=+835.438666320" Nov 24 08:11:13 crc kubenswrapper[4691]: I1124 08:11:13.492046 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-vnlb4" podStartSLOduration=3.870086283 podStartE2EDuration="30.492007946s" podCreationTimestamp="2025-11-24 08:10:43 +0000 UTC" firstStartedPulling="2025-11-24 08:10:45.919170188 +0000 UTC m=+807.918119437" lastFinishedPulling="2025-11-24 08:11:12.541091851 +0000 UTC m=+834.540041100" observedRunningTime="2025-11-24 08:11:13.463015176 +0000 UTC m=+835.461964435" watchObservedRunningTime="2025-11-24 08:11:13.492007946 +0000 UTC m=+835.490957195" Nov 24 08:11:13 crc kubenswrapper[4691]: I1124 08:11:13.561539 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ppdhs" podStartSLOduration=4.370750183 podStartE2EDuration="31.56150305s" podCreationTimestamp="2025-11-24 08:10:42 +0000 UTC" firstStartedPulling="2025-11-24 08:10:45.40402446 +0000 UTC m=+807.402973709" lastFinishedPulling="2025-11-24 08:11:12.594777317 +0000 UTC m=+834.593726576" observedRunningTime="2025-11-24 08:11:13.55218491 +0000 UTC m=+835.551134159" watchObservedRunningTime="2025-11-24 08:11:13.56150305 +0000 UTC m=+835.560452299" Nov 24 08:11:14 crc kubenswrapper[4691]: E1124 08:11:14.130772 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jctfk" podUID="39df322c-3527-4b0d-a719-4ecbfa944a56" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.417860 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-f7g9v" event={"ID":"bc63ea3b-7e56-46cc-b2e9-2ff7b0cddc2e","Type":"ContainerStarted","Data":"8e86882e8a806dbfc66b823375e6e5375716c1c2dfffb01bdc60f00e063d1ea8"} Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.440573 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-f7g9v" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.440646 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-6jgx4" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.440681 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wmpvm" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.440693 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cb74df96-4czcs" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.440704 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-6jgx4" event={"ID":"132ed997-05f1-4484-a11a-3e282b0e889b","Type":"ContainerStarted","Data":"26b4e442210466b72a4ea4e4af36317130f7131f337056c05c1b1cec047a6096"} Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.440724 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wmpvm" event={"ID":"c7e06db1-dbe0-48c4-ba25-ef962e6cd3d7","Type":"ContainerStarted","Data":"6f25be4663d2fcfc65c30611aa056f9395a50f67a3f948e55647c28522c2d193"} Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.440736 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-4czcs" event={"ID":"ccc21638-592f-4e4f-87df-f95f79a5c23e","Type":"ContainerStarted","Data":"618f566bceaf0a3ef8fdabb33653174a778ee5fc9f22e4e403402403afebcbbc"} Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.442665 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-2w275" event={"ID":"be284da4-49c2-4967-a810-eb5dbece93a3","Type":"ContainerStarted","Data":"5822d1d855a9348526bfc22f663b03d0836e7821bfed27e2faf77f39dbbb8ec2"} Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.443532 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-2w275" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.455639 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bh7th" event={"ID":"0eb9999f-a946-4946-83e0-6cbf7be82741","Type":"ContainerStarted","Data":"f0012c7b3f2a7ed612c8d1df2051187b6168cb5de0bc268d72107e87569a6bd4"} Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.456118 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bh7th" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.464195 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-6jgx4" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.464614 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5cb74df96-4czcs" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.474933 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-clqqr" event={"ID":"c2acb14d-547e-4528-addc-5bb388370b04","Type":"ContainerStarted","Data":"229889b65697b068e479965b2bdb97195e4aa7cf151292692d7ed135b7c10bfc"} Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.478802 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-clqqr" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.488573 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-554b4f8994-dck8w" event={"ID":"f46c7222-cbb0-457d-bb11-15d8cb855c8b","Type":"ContainerStarted","Data":"dcd2e7fe779c2bcdd4f04a3a4e8c639cc91de5084b032c06efb478fc0b6b357f"} Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.489698 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-554b4f8994-dck8w" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.495341 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-f7g9v" podStartSLOduration=3.872279466 podStartE2EDuration="31.49532235s" podCreationTimestamp="2025-11-24 08:10:43 +0000 UTC" firstStartedPulling="2025-11-24 08:10:46.322620098 +0000 UTC m=+808.321569347" lastFinishedPulling="2025-11-24 08:11:13.945662982 +0000 UTC m=+835.944612231" observedRunningTime="2025-11-24 08:11:14.463271981 +0000 UTC m=+836.462221240" watchObservedRunningTime="2025-11-24 08:11:14.49532235 +0000 UTC m=+836.494271599" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.496067 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-clqqr" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.517754 4691 generic.go:334] "Generic (PLEG): container finished" podID="c145a1b2-551b-4166-8f18-9e0612d384de" containerID="09fb0482ffec3a953dd2f81ec379cd4dda32a5499c37245626a39b770d118049" exitCode=0 Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.518339 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m7pj8" event={"ID":"c145a1b2-551b-4166-8f18-9e0612d384de","Type":"ContainerDied","Data":"09fb0482ffec3a953dd2f81ec379cd4dda32a5499c37245626a39b770d118049"} Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.531001 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-sn2x6" event={"ID":"0ed3c5b9-6275-4aa7-9f4c-a5e7ae6404f2","Type":"ContainerStarted","Data":"6f696361a10333c87c51618eca52d4f8b7cfbb8b0fd96dbd66f15d0f50fc2f39"} Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.532646 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-sn2x6" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.537431 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-6jgx4" podStartSLOduration=4.081416517 podStartE2EDuration="31.537399559s" podCreationTimestamp="2025-11-24 08:10:43 +0000 UTC" firstStartedPulling="2025-11-24 08:10:45.421127046 +0000 UTC m=+807.420076285" lastFinishedPulling="2025-11-24 08:11:12.877110078 +0000 UTC m=+834.876059327" observedRunningTime="2025-11-24 08:11:14.531037245 +0000 UTC m=+836.529986484" watchObservedRunningTime="2025-11-24 08:11:14.537399559 +0000 UTC m=+836.536348808" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.546326 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wmpvm" podStartSLOduration=4.871577454 podStartE2EDuration="31.546293097s" podCreationTimestamp="2025-11-24 08:10:43 +0000 UTC" firstStartedPulling="2025-11-24 08:10:46.380713862 +0000 UTC m=+808.379663111" lastFinishedPulling="2025-11-24 08:11:13.055429505 +0000 UTC m=+835.054378754" observedRunningTime="2025-11-24 08:11:14.506750151 +0000 UTC m=+836.505699420" watchObservedRunningTime="2025-11-24 08:11:14.546293097 +0000 UTC m=+836.545242346" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.548989 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-nfx6g" event={"ID":"f8a9119f-fc7e-4bb6-89da-91f7655c633d","Type":"ContainerStarted","Data":"2f411a18b5deaa2a365e15d750dbf2529816a337e3d67c6faa909becafa417b6"} Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.549085 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-nfx6g" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.559846 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-sr8nk" event={"ID":"1c460dd6-5f3d-4eae-9436-c46ccd900674","Type":"ContainerStarted","Data":"55cd58df6228705e140a7ed743bacedc326ad4a14039e4b39db2aa280b1ade76"} Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.561245 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-sr8nk" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.567704 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-sr8nk" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.577013 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-psr85" event={"ID":"81f22002-1443-4393-8d6d-22db227fc480","Type":"ContainerStarted","Data":"009ba744218d7d7a0143ed630cc1dde2cd228278bbe8208c9c6e4ed64854f102"} Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.581591 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-8qh9m" event={"ID":"345576fd-a4cd-4c76-8c81-3669a42be294","Type":"ContainerStarted","Data":"373c4c3efe6e9b3017ad859fe6b111fc06abd4f5afb2623ea708220bbab152da"} Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.582370 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-864885998-8qh9m" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.591035 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-8tqbw" event={"ID":"66685e8a-e196-444b-9149-e7861ff2c8b5","Type":"ContainerStarted","Data":"0b33c082760b1ef3954b4e57276fcba24d730ee0e21acb10c9bd426bd91049b3"} Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.592211 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-8tqbw" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.605613 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jctfk" event={"ID":"39df322c-3527-4b0d-a719-4ecbfa944a56","Type":"ContainerStarted","Data":"e2d34afc6a0741d7506d9da37bbcebc7867590f943666aca25d2d681437a8630"} Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.605723 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-8tqbw" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.609671 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-cql69" event={"ID":"df3746c8-ec8b-406e-b2f5-7bd93dd46646","Type":"ContainerStarted","Data":"8ef57640a5aa5c44e5e36941960b3ea77b700ad7c8b15ac2ede2d8db369966c4"} Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.611524 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-774b86978c-cql69" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.629182 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-774b86978c-cql69" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.630425 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-clqqr" podStartSLOduration=4.970534809 podStartE2EDuration="31.630393023s" podCreationTimestamp="2025-11-24 08:10:43 +0000 UTC" firstStartedPulling="2025-11-24 08:10:45.994716076 +0000 UTC m=+807.993665325" lastFinishedPulling="2025-11-24 08:11:12.65457429 +0000 UTC m=+834.653523539" observedRunningTime="2025-11-24 08:11:14.611302851 +0000 UTC m=+836.610252100" watchObservedRunningTime="2025-11-24 08:11:14.630393023 +0000 UTC m=+836.629342272" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.631051 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kp2bb" event={"ID":"63c87b6f-c210-4837-bde9-87436a88578f","Type":"ContainerStarted","Data":"f13b54db9a0347ba8451dc01e4572be2b216372a5c35cd1ba6b3e76be41b23dd"} Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.631771 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kp2bb" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.658515 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d" event={"ID":"b9f37eec-f8fc-4083-b29a-4e704c802c8a","Type":"ContainerStarted","Data":"9f7fb949a59a613c230405361d285d3e7a406680617b47b6b67a9e59fbd391bc"} Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.658734 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.687887 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-v7vtk" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.694621 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-2w275" podStartSLOduration=4.006900357 podStartE2EDuration="31.694595954s" podCreationTimestamp="2025-11-24 08:10:43 +0000 UTC" firstStartedPulling="2025-11-24 08:10:46.255411741 +0000 UTC m=+808.254360990" lastFinishedPulling="2025-11-24 08:11:13.943107338 +0000 UTC m=+835.942056587" observedRunningTime="2025-11-24 08:11:14.658141467 +0000 UTC m=+836.657090716" watchObservedRunningTime="2025-11-24 08:11:14.694595954 +0000 UTC m=+836.693545203" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.695041 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bh7th" podStartSLOduration=5.052159705 podStartE2EDuration="31.695037386s" podCreationTimestamp="2025-11-24 08:10:43 +0000 UTC" firstStartedPulling="2025-11-24 08:10:46.38135111 +0000 UTC m=+808.380300359" lastFinishedPulling="2025-11-24 08:11:13.024228781 +0000 UTC m=+835.023178040" observedRunningTime="2025-11-24 08:11:14.693938305 +0000 UTC m=+836.692887554" watchObservedRunningTime="2025-11-24 08:11:14.695037386 +0000 UTC m=+836.693986635" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.727207 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5cb74df96-4czcs" podStartSLOduration=4.890598474 podStartE2EDuration="31.727181158s" podCreationTimestamp="2025-11-24 08:10:43 +0000 UTC" firstStartedPulling="2025-11-24 08:10:46.332102733 +0000 UTC m=+808.331051982" lastFinishedPulling="2025-11-24 08:11:13.168685417 +0000 UTC m=+835.167634666" observedRunningTime="2025-11-24 08:11:14.723061979 +0000 UTC m=+836.722011228" watchObservedRunningTime="2025-11-24 08:11:14.727181158 +0000 UTC m=+836.726130407" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.751590 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-554b4f8994-dck8w" podStartSLOduration=3.974782736 podStartE2EDuration="31.751566394s" podCreationTimestamp="2025-11-24 08:10:43 +0000 UTC" firstStartedPulling="2025-11-24 08:10:45.891770964 +0000 UTC m=+807.890720213" lastFinishedPulling="2025-11-24 08:11:13.668554622 +0000 UTC m=+835.667503871" observedRunningTime="2025-11-24 08:11:14.748481495 +0000 UTC m=+836.747430744" watchObservedRunningTime="2025-11-24 08:11:14.751566394 +0000 UTC m=+836.750515643" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.804178 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-864885998-8qh9m" podStartSLOduration=7.946404673 podStartE2EDuration="31.804159289s" podCreationTimestamp="2025-11-24 08:10:43 +0000 UTC" firstStartedPulling="2025-11-24 08:10:46.372554085 +0000 UTC m=+808.371503334" lastFinishedPulling="2025-11-24 08:11:10.230308701 +0000 UTC m=+832.229257950" observedRunningTime="2025-11-24 08:11:14.803820559 +0000 UTC m=+836.802769818" watchObservedRunningTime="2025-11-24 08:11:14.804159289 +0000 UTC m=+836.803108538" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.891486 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d" podStartSLOduration=24.828019917 podStartE2EDuration="31.891453418s" podCreationTimestamp="2025-11-24 08:10:43 +0000 UTC" firstStartedPulling="2025-11-24 08:11:03.351690116 +0000 UTC m=+825.350639365" lastFinishedPulling="2025-11-24 08:11:10.415123617 +0000 UTC m=+832.414072866" observedRunningTime="2025-11-24 08:11:14.885505596 +0000 UTC m=+836.884454845" watchObservedRunningTime="2025-11-24 08:11:14.891453418 +0000 UTC m=+836.890402657" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.952141 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-nfx6g" podStartSLOduration=4.271773792 podStartE2EDuration="31.952111316s" podCreationTimestamp="2025-11-24 08:10:43 +0000 UTC" firstStartedPulling="2025-11-24 08:10:45.400300012 +0000 UTC m=+807.399249261" lastFinishedPulling="2025-11-24 08:11:13.080637536 +0000 UTC m=+835.079586785" observedRunningTime="2025-11-24 08:11:14.950388606 +0000 UTC m=+836.949337875" watchObservedRunningTime="2025-11-24 08:11:14.952111316 +0000 UTC m=+836.951060565" Nov 24 08:11:14 crc kubenswrapper[4691]: I1124 08:11:14.958823 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-774b86978c-cql69" podStartSLOduration=4.375406125 podStartE2EDuration="31.958791499s" podCreationTimestamp="2025-11-24 08:10:43 +0000 UTC" firstStartedPulling="2025-11-24 08:10:45.266518526 +0000 UTC m=+807.265467775" lastFinishedPulling="2025-11-24 08:11:12.8499039 +0000 UTC m=+834.848853149" observedRunningTime="2025-11-24 08:11:14.9246464 +0000 UTC m=+836.923595659" watchObservedRunningTime="2025-11-24 08:11:14.958791499 +0000 UTC m=+836.957740748" Nov 24 08:11:15 crc kubenswrapper[4691]: I1124 08:11:15.013544 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-psr85" podStartSLOduration=25.466460727 podStartE2EDuration="28.013523485s" podCreationTimestamp="2025-11-24 08:10:47 +0000 UTC" firstStartedPulling="2025-11-24 08:11:11.395989129 +0000 UTC m=+833.394938378" lastFinishedPulling="2025-11-24 08:11:13.943051887 +0000 UTC m=+835.942001136" observedRunningTime="2025-11-24 08:11:14.994150534 +0000 UTC m=+836.993099783" watchObservedRunningTime="2025-11-24 08:11:15.013523485 +0000 UTC m=+837.012472734" Nov 24 08:11:15 crc kubenswrapper[4691]: I1124 08:11:15.017429 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-8tqbw" podStartSLOduration=5.115796419 podStartE2EDuration="32.017423818s" podCreationTimestamp="2025-11-24 08:10:43 +0000 UTC" firstStartedPulling="2025-11-24 08:10:45.878942032 +0000 UTC m=+807.877891281" lastFinishedPulling="2025-11-24 08:11:12.780569431 +0000 UTC m=+834.779518680" observedRunningTime="2025-11-24 08:11:15.012219608 +0000 UTC m=+837.011168857" watchObservedRunningTime="2025-11-24 08:11:15.017423818 +0000 UTC m=+837.016373067" Nov 24 08:11:15 crc kubenswrapper[4691]: I1124 08:11:15.039948 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-sn2x6" podStartSLOduration=5.394712351 podStartE2EDuration="32.03992216s" podCreationTimestamp="2025-11-24 08:10:43 +0000 UTC" firstStartedPulling="2025-11-24 08:10:46.379011762 +0000 UTC m=+808.377961021" lastFinishedPulling="2025-11-24 08:11:13.024221581 +0000 UTC m=+835.023170830" observedRunningTime="2025-11-24 08:11:15.034133683 +0000 UTC m=+837.033082952" watchObservedRunningTime="2025-11-24 08:11:15.03992216 +0000 UTC m=+837.038871409" Nov 24 08:11:15 crc kubenswrapper[4691]: I1124 08:11:15.107904 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-sr8nk" podStartSLOduration=5.212463121 podStartE2EDuration="32.10788647s" podCreationTimestamp="2025-11-24 08:10:43 +0000 UTC" firstStartedPulling="2025-11-24 08:10:46.00244151 +0000 UTC m=+808.001413060" lastFinishedPulling="2025-11-24 08:11:12.89788716 +0000 UTC m=+834.896836409" observedRunningTime="2025-11-24 08:11:15.104692817 +0000 UTC m=+837.103642066" watchObservedRunningTime="2025-11-24 08:11:15.10788647 +0000 UTC m=+837.106835719" Nov 24 08:11:15 crc kubenswrapper[4691]: I1124 08:11:15.139050 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kp2bb" podStartSLOduration=8.860987545 podStartE2EDuration="32.139028202s" podCreationTimestamp="2025-11-24 08:10:43 +0000 UTC" firstStartedPulling="2025-11-24 08:10:46.366268793 +0000 UTC m=+808.365218042" lastFinishedPulling="2025-11-24 08:11:09.64430945 +0000 UTC m=+831.643258699" observedRunningTime="2025-11-24 08:11:15.13413741 +0000 UTC m=+837.133086659" watchObservedRunningTime="2025-11-24 08:11:15.139028202 +0000 UTC m=+837.137977451" Nov 24 08:11:15 crc kubenswrapper[4691]: I1124 08:11:15.669562 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jctfk" event={"ID":"39df322c-3527-4b0d-a719-4ecbfa944a56","Type":"ContainerStarted","Data":"843f99b323fc71542f84f97ac6996e1a1e98ce9aa72c20bcbd623ed11333b5d1"} Nov 24 08:11:15 crc kubenswrapper[4691]: I1124 08:11:15.670158 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jctfk" Nov 24 08:11:15 crc kubenswrapper[4691]: I1124 08:11:15.674832 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m7pj8" event={"ID":"c145a1b2-551b-4166-8f18-9e0612d384de","Type":"ContainerStarted","Data":"88745384ecbcc6e6ac0b8036cdfadf9a006c6d08b8efe3e48b2615d08faa2acb"} Nov 24 08:11:15 crc kubenswrapper[4691]: I1124 08:11:15.678433 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-nfx6g" Nov 24 08:11:15 crc kubenswrapper[4691]: I1124 08:11:15.679116 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wmpvm" Nov 24 08:11:15 crc kubenswrapper[4691]: I1124 08:11:15.695404 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jctfk" podStartSLOduration=3.537816134 podStartE2EDuration="32.695383164s" podCreationTimestamp="2025-11-24 08:10:43 +0000 UTC" firstStartedPulling="2025-11-24 08:10:46.002349747 +0000 UTC m=+808.001298997" lastFinishedPulling="2025-11-24 08:11:15.159916778 +0000 UTC m=+837.158866027" observedRunningTime="2025-11-24 08:11:15.691206193 +0000 UTC m=+837.690155452" watchObservedRunningTime="2025-11-24 08:11:15.695383164 +0000 UTC m=+837.694332413" Nov 24 08:11:15 crc kubenswrapper[4691]: I1124 08:11:15.721053 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-m7pj8" podStartSLOduration=15.032129222 podStartE2EDuration="18.721025237s" podCreationTimestamp="2025-11-24 08:10:57 +0000 UTC" firstStartedPulling="2025-11-24 08:11:11.39465529 +0000 UTC m=+833.393604539" lastFinishedPulling="2025-11-24 08:11:15.083551305 +0000 UTC m=+837.082500554" observedRunningTime="2025-11-24 08:11:15.720118191 +0000 UTC m=+837.719067450" watchObservedRunningTime="2025-11-24 08:11:15.721025237 +0000 UTC m=+837.719974486" Nov 24 08:11:17 crc kubenswrapper[4691]: I1124 08:11:17.650234 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d" Nov 24 08:11:17 crc kubenswrapper[4691]: I1124 08:11:17.951539 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-7888ffcffd-8jst5" Nov 24 08:11:17 crc kubenswrapper[4691]: I1124 08:11:17.994438 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-m7pj8" Nov 24 08:11:17 crc kubenswrapper[4691]: I1124 08:11:17.994509 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-m7pj8" Nov 24 08:11:18 crc kubenswrapper[4691]: I1124 08:11:18.028669 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-psr85" Nov 24 08:11:18 crc kubenswrapper[4691]: I1124 08:11:18.028789 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-psr85" Nov 24 08:11:18 crc kubenswrapper[4691]: I1124 08:11:18.055717 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-m7pj8" Nov 24 08:11:18 crc kubenswrapper[4691]: I1124 08:11:18.089159 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-psr85" Nov 24 08:11:19 crc kubenswrapper[4691]: I1124 08:11:19.753485 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-psr85" Nov 24 08:11:20 crc kubenswrapper[4691]: I1124 08:11:20.712125 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-psr85"] Nov 24 08:11:21 crc kubenswrapper[4691]: I1124 08:11:21.721715 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-psr85" podUID="81f22002-1443-4393-8d6d-22db227fc480" containerName="registry-server" containerID="cri-o://009ba744218d7d7a0143ed630cc1dde2cd228278bbe8208c9c6e4ed64854f102" gracePeriod=2 Nov 24 08:11:22 crc kubenswrapper[4691]: I1124 08:11:22.182106 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-psr85" Nov 24 08:11:22 crc kubenswrapper[4691]: I1124 08:11:22.337783 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d7tjh\" (UniqueName: \"kubernetes.io/projected/81f22002-1443-4393-8d6d-22db227fc480-kube-api-access-d7tjh\") pod \"81f22002-1443-4393-8d6d-22db227fc480\" (UID: \"81f22002-1443-4393-8d6d-22db227fc480\") " Nov 24 08:11:22 crc kubenswrapper[4691]: I1124 08:11:22.337960 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81f22002-1443-4393-8d6d-22db227fc480-utilities\") pod \"81f22002-1443-4393-8d6d-22db227fc480\" (UID: \"81f22002-1443-4393-8d6d-22db227fc480\") " Nov 24 08:11:22 crc kubenswrapper[4691]: I1124 08:11:22.338118 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81f22002-1443-4393-8d6d-22db227fc480-catalog-content\") pod \"81f22002-1443-4393-8d6d-22db227fc480\" (UID: \"81f22002-1443-4393-8d6d-22db227fc480\") " Nov 24 08:11:22 crc kubenswrapper[4691]: I1124 08:11:22.339539 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/81f22002-1443-4393-8d6d-22db227fc480-utilities" (OuterVolumeSpecName: "utilities") pod "81f22002-1443-4393-8d6d-22db227fc480" (UID: "81f22002-1443-4393-8d6d-22db227fc480"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:11:22 crc kubenswrapper[4691]: I1124 08:11:22.344790 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81f22002-1443-4393-8d6d-22db227fc480-kube-api-access-d7tjh" (OuterVolumeSpecName: "kube-api-access-d7tjh") pod "81f22002-1443-4393-8d6d-22db227fc480" (UID: "81f22002-1443-4393-8d6d-22db227fc480"). InnerVolumeSpecName "kube-api-access-d7tjh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:11:22 crc kubenswrapper[4691]: I1124 08:11:22.397847 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/81f22002-1443-4393-8d6d-22db227fc480-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "81f22002-1443-4393-8d6d-22db227fc480" (UID: "81f22002-1443-4393-8d6d-22db227fc480"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:11:22 crc kubenswrapper[4691]: I1124 08:11:22.440599 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81f22002-1443-4393-8d6d-22db227fc480-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 08:11:22 crc kubenswrapper[4691]: I1124 08:11:22.440644 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81f22002-1443-4393-8d6d-22db227fc480-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 08:11:22 crc kubenswrapper[4691]: I1124 08:11:22.440659 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d7tjh\" (UniqueName: \"kubernetes.io/projected/81f22002-1443-4393-8d6d-22db227fc480-kube-api-access-d7tjh\") on node \"crc\" DevicePath \"\"" Nov 24 08:11:22 crc kubenswrapper[4691]: I1124 08:11:22.736998 4691 generic.go:334] "Generic (PLEG): container finished" podID="81f22002-1443-4393-8d6d-22db227fc480" containerID="009ba744218d7d7a0143ed630cc1dde2cd228278bbe8208c9c6e4ed64854f102" exitCode=0 Nov 24 08:11:22 crc kubenswrapper[4691]: I1124 08:11:22.737062 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-psr85" event={"ID":"81f22002-1443-4393-8d6d-22db227fc480","Type":"ContainerDied","Data":"009ba744218d7d7a0143ed630cc1dde2cd228278bbe8208c9c6e4ed64854f102"} Nov 24 08:11:22 crc kubenswrapper[4691]: I1124 08:11:22.737096 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-psr85" Nov 24 08:11:22 crc kubenswrapper[4691]: I1124 08:11:22.737123 4691 scope.go:117] "RemoveContainer" containerID="009ba744218d7d7a0143ed630cc1dde2cd228278bbe8208c9c6e4ed64854f102" Nov 24 08:11:22 crc kubenswrapper[4691]: I1124 08:11:22.737106 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-psr85" event={"ID":"81f22002-1443-4393-8d6d-22db227fc480","Type":"ContainerDied","Data":"cca9693a71910b1d30d5c43fc1a449c78a981dd5ffca8bd4ba37a91d8ce1416e"} Nov 24 08:11:22 crc kubenswrapper[4691]: I1124 08:11:22.768131 4691 scope.go:117] "RemoveContainer" containerID="65b14e24203b0fb0f767df422884fe5364325bc0b4a39be7bbaf815ea23c78f4" Nov 24 08:11:22 crc kubenswrapper[4691]: I1124 08:11:22.790175 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-psr85"] Nov 24 08:11:22 crc kubenswrapper[4691]: I1124 08:11:22.797443 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-psr85"] Nov 24 08:11:23 crc kubenswrapper[4691]: I1124 08:11:23.699050 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-2w275" Nov 24 08:11:23 crc kubenswrapper[4691]: I1124 08:11:23.721978 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-f7g9v" Nov 24 08:11:23 crc kubenswrapper[4691]: I1124 08:11:23.841404 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jctfk" Nov 24 08:11:23 crc kubenswrapper[4691]: I1124 08:11:23.861575 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-vnlb4" Nov 24 08:11:23 crc kubenswrapper[4691]: I1124 08:11:23.937236 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bh7th" Nov 24 08:11:24 crc kubenswrapper[4691]: I1124 08:11:24.061668 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-554b4f8994-dck8w" Nov 24 08:11:24 crc kubenswrapper[4691]: I1124 08:11:24.342897 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-sn2x6" Nov 24 08:11:24 crc kubenswrapper[4691]: I1124 08:11:24.362623 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kp2bb" Nov 24 08:11:24 crc kubenswrapper[4691]: I1124 08:11:24.581521 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-864885998-8qh9m" Nov 24 08:11:24 crc kubenswrapper[4691]: I1124 08:11:24.772177 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81f22002-1443-4393-8d6d-22db227fc480" path="/var/lib/kubelet/pods/81f22002-1443-4393-8d6d-22db227fc480/volumes" Nov 24 08:11:25 crc kubenswrapper[4691]: I1124 08:11:25.069151 4691 scope.go:117] "RemoveContainer" containerID="d39ee9682f21753d63314af05359e82362fca0cb84e788d513a93142100dbce6" Nov 24 08:11:25 crc kubenswrapper[4691]: I1124 08:11:25.091269 4691 scope.go:117] "RemoveContainer" containerID="009ba744218d7d7a0143ed630cc1dde2cd228278bbe8208c9c6e4ed64854f102" Nov 24 08:11:25 crc kubenswrapper[4691]: E1124 08:11:25.092028 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"009ba744218d7d7a0143ed630cc1dde2cd228278bbe8208c9c6e4ed64854f102\": container with ID starting with 009ba744218d7d7a0143ed630cc1dde2cd228278bbe8208c9c6e4ed64854f102 not found: ID does not exist" containerID="009ba744218d7d7a0143ed630cc1dde2cd228278bbe8208c9c6e4ed64854f102" Nov 24 08:11:25 crc kubenswrapper[4691]: I1124 08:11:25.092092 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"009ba744218d7d7a0143ed630cc1dde2cd228278bbe8208c9c6e4ed64854f102"} err="failed to get container status \"009ba744218d7d7a0143ed630cc1dde2cd228278bbe8208c9c6e4ed64854f102\": rpc error: code = NotFound desc = could not find container \"009ba744218d7d7a0143ed630cc1dde2cd228278bbe8208c9c6e4ed64854f102\": container with ID starting with 009ba744218d7d7a0143ed630cc1dde2cd228278bbe8208c9c6e4ed64854f102 not found: ID does not exist" Nov 24 08:11:25 crc kubenswrapper[4691]: I1124 08:11:25.092132 4691 scope.go:117] "RemoveContainer" containerID="65b14e24203b0fb0f767df422884fe5364325bc0b4a39be7bbaf815ea23c78f4" Nov 24 08:11:25 crc kubenswrapper[4691]: E1124 08:11:25.092722 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"65b14e24203b0fb0f767df422884fe5364325bc0b4a39be7bbaf815ea23c78f4\": container with ID starting with 65b14e24203b0fb0f767df422884fe5364325bc0b4a39be7bbaf815ea23c78f4 not found: ID does not exist" containerID="65b14e24203b0fb0f767df422884fe5364325bc0b4a39be7bbaf815ea23c78f4" Nov 24 08:11:25 crc kubenswrapper[4691]: I1124 08:11:25.092765 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"65b14e24203b0fb0f767df422884fe5364325bc0b4a39be7bbaf815ea23c78f4"} err="failed to get container status \"65b14e24203b0fb0f767df422884fe5364325bc0b4a39be7bbaf815ea23c78f4\": rpc error: code = NotFound desc = could not find container \"65b14e24203b0fb0f767df422884fe5364325bc0b4a39be7bbaf815ea23c78f4\": container with ID starting with 65b14e24203b0fb0f767df422884fe5364325bc0b4a39be7bbaf815ea23c78f4 not found: ID does not exist" Nov 24 08:11:25 crc kubenswrapper[4691]: I1124 08:11:25.092798 4691 scope.go:117] "RemoveContainer" containerID="d39ee9682f21753d63314af05359e82362fca0cb84e788d513a93142100dbce6" Nov 24 08:11:25 crc kubenswrapper[4691]: E1124 08:11:25.093306 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d39ee9682f21753d63314af05359e82362fca0cb84e788d513a93142100dbce6\": container with ID starting with d39ee9682f21753d63314af05359e82362fca0cb84e788d513a93142100dbce6 not found: ID does not exist" containerID="d39ee9682f21753d63314af05359e82362fca0cb84e788d513a93142100dbce6" Nov 24 08:11:25 crc kubenswrapper[4691]: I1124 08:11:25.093332 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d39ee9682f21753d63314af05359e82362fca0cb84e788d513a93142100dbce6"} err="failed to get container status \"d39ee9682f21753d63314af05359e82362fca0cb84e788d513a93142100dbce6\": rpc error: code = NotFound desc = could not find container \"d39ee9682f21753d63314af05359e82362fca0cb84e788d513a93142100dbce6\": container with ID starting with d39ee9682f21753d63314af05359e82362fca0cb84e788d513a93142100dbce6 not found: ID does not exist" Nov 24 08:11:28 crc kubenswrapper[4691]: I1124 08:11:28.047515 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-m7pj8" Nov 24 08:11:28 crc kubenswrapper[4691]: I1124 08:11:28.100719 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-m7pj8"] Nov 24 08:11:28 crc kubenswrapper[4691]: I1124 08:11:28.808570 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-m7pj8" podUID="c145a1b2-551b-4166-8f18-9e0612d384de" containerName="registry-server" containerID="cri-o://88745384ecbcc6e6ac0b8036cdfadf9a006c6d08b8efe3e48b2615d08faa2acb" gracePeriod=2 Nov 24 08:11:29 crc kubenswrapper[4691]: I1124 08:11:29.822728 4691 generic.go:334] "Generic (PLEG): container finished" podID="c145a1b2-551b-4166-8f18-9e0612d384de" containerID="88745384ecbcc6e6ac0b8036cdfadf9a006c6d08b8efe3e48b2615d08faa2acb" exitCode=0 Nov 24 08:11:29 crc kubenswrapper[4691]: I1124 08:11:29.822822 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m7pj8" event={"ID":"c145a1b2-551b-4166-8f18-9e0612d384de","Type":"ContainerDied","Data":"88745384ecbcc6e6ac0b8036cdfadf9a006c6d08b8efe3e48b2615d08faa2acb"} Nov 24 08:11:31 crc kubenswrapper[4691]: I1124 08:11:31.494693 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-m7pj8" Nov 24 08:11:31 crc kubenswrapper[4691]: I1124 08:11:31.688737 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2tbgg\" (UniqueName: \"kubernetes.io/projected/c145a1b2-551b-4166-8f18-9e0612d384de-kube-api-access-2tbgg\") pod \"c145a1b2-551b-4166-8f18-9e0612d384de\" (UID: \"c145a1b2-551b-4166-8f18-9e0612d384de\") " Nov 24 08:11:31 crc kubenswrapper[4691]: I1124 08:11:31.688820 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c145a1b2-551b-4166-8f18-9e0612d384de-catalog-content\") pod \"c145a1b2-551b-4166-8f18-9e0612d384de\" (UID: \"c145a1b2-551b-4166-8f18-9e0612d384de\") " Nov 24 08:11:31 crc kubenswrapper[4691]: I1124 08:11:31.688896 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c145a1b2-551b-4166-8f18-9e0612d384de-utilities\") pod \"c145a1b2-551b-4166-8f18-9e0612d384de\" (UID: \"c145a1b2-551b-4166-8f18-9e0612d384de\") " Nov 24 08:11:31 crc kubenswrapper[4691]: I1124 08:11:31.689643 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c145a1b2-551b-4166-8f18-9e0612d384de-utilities" (OuterVolumeSpecName: "utilities") pod "c145a1b2-551b-4166-8f18-9e0612d384de" (UID: "c145a1b2-551b-4166-8f18-9e0612d384de"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:11:31 crc kubenswrapper[4691]: I1124 08:11:31.696171 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c145a1b2-551b-4166-8f18-9e0612d384de-kube-api-access-2tbgg" (OuterVolumeSpecName: "kube-api-access-2tbgg") pod "c145a1b2-551b-4166-8f18-9e0612d384de" (UID: "c145a1b2-551b-4166-8f18-9e0612d384de"). InnerVolumeSpecName "kube-api-access-2tbgg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:11:31 crc kubenswrapper[4691]: I1124 08:11:31.714288 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c145a1b2-551b-4166-8f18-9e0612d384de-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c145a1b2-551b-4166-8f18-9e0612d384de" (UID: "c145a1b2-551b-4166-8f18-9e0612d384de"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:11:31 crc kubenswrapper[4691]: I1124 08:11:31.790802 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2tbgg\" (UniqueName: \"kubernetes.io/projected/c145a1b2-551b-4166-8f18-9e0612d384de-kube-api-access-2tbgg\") on node \"crc\" DevicePath \"\"" Nov 24 08:11:31 crc kubenswrapper[4691]: I1124 08:11:31.790863 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c145a1b2-551b-4166-8f18-9e0612d384de-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 08:11:31 crc kubenswrapper[4691]: I1124 08:11:31.790876 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c145a1b2-551b-4166-8f18-9e0612d384de-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 08:11:31 crc kubenswrapper[4691]: I1124 08:11:31.842670 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m7pj8" event={"ID":"c145a1b2-551b-4166-8f18-9e0612d384de","Type":"ContainerDied","Data":"ba6b6e33b7ad8dae91c77df824e54e566ee7d35cbcb7dc81e753525351e854b7"} Nov 24 08:11:31 crc kubenswrapper[4691]: I1124 08:11:31.842782 4691 scope.go:117] "RemoveContainer" containerID="88745384ecbcc6e6ac0b8036cdfadf9a006c6d08b8efe3e48b2615d08faa2acb" Nov 24 08:11:31 crc kubenswrapper[4691]: I1124 08:11:31.842807 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-m7pj8" Nov 24 08:11:31 crc kubenswrapper[4691]: I1124 08:11:31.878497 4691 scope.go:117] "RemoveContainer" containerID="09fb0482ffec3a953dd2f81ec379cd4dda32a5499c37245626a39b770d118049" Nov 24 08:11:31 crc kubenswrapper[4691]: I1124 08:11:31.882828 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-m7pj8"] Nov 24 08:11:31 crc kubenswrapper[4691]: I1124 08:11:31.886909 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-m7pj8"] Nov 24 08:11:31 crc kubenswrapper[4691]: I1124 08:11:31.904910 4691 scope.go:117] "RemoveContainer" containerID="bb57246081c06b1e65313b8d8c9b551fa896642ac3862acc6f6e7b6d4561f09f" Nov 24 08:11:32 crc kubenswrapper[4691]: I1124 08:11:32.772739 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c145a1b2-551b-4166-8f18-9e0612d384de" path="/var/lib/kubelet/pods/c145a1b2-551b-4166-8f18-9e0612d384de/volumes" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.100957 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-5x2mn"] Nov 24 08:11:37 crc kubenswrapper[4691]: E1124 08:11:37.101603 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29cbcb95-d722-413b-893c-f04bdb5ea301" containerName="extract-utilities" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.101618 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="29cbcb95-d722-413b-893c-f04bdb5ea301" containerName="extract-utilities" Nov 24 08:11:37 crc kubenswrapper[4691]: E1124 08:11:37.101637 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29cbcb95-d722-413b-893c-f04bdb5ea301" containerName="registry-server" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.101644 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="29cbcb95-d722-413b-893c-f04bdb5ea301" containerName="registry-server" Nov 24 08:11:37 crc kubenswrapper[4691]: E1124 08:11:37.101657 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5492776-03c1-422f-8097-e69c5fbf459b" containerName="registry-server" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.101665 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5492776-03c1-422f-8097-e69c5fbf459b" containerName="registry-server" Nov 24 08:11:37 crc kubenswrapper[4691]: E1124 08:11:37.101679 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c145a1b2-551b-4166-8f18-9e0612d384de" containerName="extract-utilities" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.101684 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="c145a1b2-551b-4166-8f18-9e0612d384de" containerName="extract-utilities" Nov 24 08:11:37 crc kubenswrapper[4691]: E1124 08:11:37.101691 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c145a1b2-551b-4166-8f18-9e0612d384de" containerName="extract-content" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.101698 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="c145a1b2-551b-4166-8f18-9e0612d384de" containerName="extract-content" Nov 24 08:11:37 crc kubenswrapper[4691]: E1124 08:11:37.101711 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c145a1b2-551b-4166-8f18-9e0612d384de" containerName="registry-server" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.101716 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="c145a1b2-551b-4166-8f18-9e0612d384de" containerName="registry-server" Nov 24 08:11:37 crc kubenswrapper[4691]: E1124 08:11:37.101733 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81f22002-1443-4393-8d6d-22db227fc480" containerName="registry-server" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.101739 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="81f22002-1443-4393-8d6d-22db227fc480" containerName="registry-server" Nov 24 08:11:37 crc kubenswrapper[4691]: E1124 08:11:37.101751 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81f22002-1443-4393-8d6d-22db227fc480" containerName="extract-utilities" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.101757 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="81f22002-1443-4393-8d6d-22db227fc480" containerName="extract-utilities" Nov 24 08:11:37 crc kubenswrapper[4691]: E1124 08:11:37.101771 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5492776-03c1-422f-8097-e69c5fbf459b" containerName="extract-content" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.101777 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5492776-03c1-422f-8097-e69c5fbf459b" containerName="extract-content" Nov 24 08:11:37 crc kubenswrapper[4691]: E1124 08:11:37.101789 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81f22002-1443-4393-8d6d-22db227fc480" containerName="extract-content" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.101794 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="81f22002-1443-4393-8d6d-22db227fc480" containerName="extract-content" Nov 24 08:11:37 crc kubenswrapper[4691]: E1124 08:11:37.101807 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29cbcb95-d722-413b-893c-f04bdb5ea301" containerName="extract-content" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.101813 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="29cbcb95-d722-413b-893c-f04bdb5ea301" containerName="extract-content" Nov 24 08:11:37 crc kubenswrapper[4691]: E1124 08:11:37.101824 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5492776-03c1-422f-8097-e69c5fbf459b" containerName="extract-utilities" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.101830 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5492776-03c1-422f-8097-e69c5fbf459b" containerName="extract-utilities" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.101968 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5492776-03c1-422f-8097-e69c5fbf459b" containerName="registry-server" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.101976 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="29cbcb95-d722-413b-893c-f04bdb5ea301" containerName="registry-server" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.101990 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="c145a1b2-551b-4166-8f18-9e0612d384de" containerName="registry-server" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.102000 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="81f22002-1443-4393-8d6d-22db227fc480" containerName="registry-server" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.102725 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-5x2mn" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.105940 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.106128 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-5xt7f" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.106247 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.108674 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.111841 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-5x2mn"] Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.180972 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40aec6d7-a3f1-461a-988b-8f87be6ad1a9-config\") pod \"dnsmasq-dns-675f4bcbfc-5x2mn\" (UID: \"40aec6d7-a3f1-461a-988b-8f87be6ad1a9\") " pod="openstack/dnsmasq-dns-675f4bcbfc-5x2mn" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.181030 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9wbcs\" (UniqueName: \"kubernetes.io/projected/40aec6d7-a3f1-461a-988b-8f87be6ad1a9-kube-api-access-9wbcs\") pod \"dnsmasq-dns-675f4bcbfc-5x2mn\" (UID: \"40aec6d7-a3f1-461a-988b-8f87be6ad1a9\") " pod="openstack/dnsmasq-dns-675f4bcbfc-5x2mn" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.235674 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-7pjkz"] Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.237175 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-7pjkz" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.246864 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.282306 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ed705ca-195e-4f31-a76d-529ec10e4e2f-config\") pod \"dnsmasq-dns-78dd6ddcc-7pjkz\" (UID: \"2ed705ca-195e-4f31-a76d-529ec10e4e2f\") " pod="openstack/dnsmasq-dns-78dd6ddcc-7pjkz" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.282371 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x28rc\" (UniqueName: \"kubernetes.io/projected/2ed705ca-195e-4f31-a76d-529ec10e4e2f-kube-api-access-x28rc\") pod \"dnsmasq-dns-78dd6ddcc-7pjkz\" (UID: \"2ed705ca-195e-4f31-a76d-529ec10e4e2f\") " pod="openstack/dnsmasq-dns-78dd6ddcc-7pjkz" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.282399 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40aec6d7-a3f1-461a-988b-8f87be6ad1a9-config\") pod \"dnsmasq-dns-675f4bcbfc-5x2mn\" (UID: \"40aec6d7-a3f1-461a-988b-8f87be6ad1a9\") " pod="openstack/dnsmasq-dns-675f4bcbfc-5x2mn" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.282421 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9wbcs\" (UniqueName: \"kubernetes.io/projected/40aec6d7-a3f1-461a-988b-8f87be6ad1a9-kube-api-access-9wbcs\") pod \"dnsmasq-dns-675f4bcbfc-5x2mn\" (UID: \"40aec6d7-a3f1-461a-988b-8f87be6ad1a9\") " pod="openstack/dnsmasq-dns-675f4bcbfc-5x2mn" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.282440 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ed705ca-195e-4f31-a76d-529ec10e4e2f-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-7pjkz\" (UID: \"2ed705ca-195e-4f31-a76d-529ec10e4e2f\") " pod="openstack/dnsmasq-dns-78dd6ddcc-7pjkz" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.283468 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40aec6d7-a3f1-461a-988b-8f87be6ad1a9-config\") pod \"dnsmasq-dns-675f4bcbfc-5x2mn\" (UID: \"40aec6d7-a3f1-461a-988b-8f87be6ad1a9\") " pod="openstack/dnsmasq-dns-675f4bcbfc-5x2mn" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.283766 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-7pjkz"] Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.353340 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9wbcs\" (UniqueName: \"kubernetes.io/projected/40aec6d7-a3f1-461a-988b-8f87be6ad1a9-kube-api-access-9wbcs\") pod \"dnsmasq-dns-675f4bcbfc-5x2mn\" (UID: \"40aec6d7-a3f1-461a-988b-8f87be6ad1a9\") " pod="openstack/dnsmasq-dns-675f4bcbfc-5x2mn" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.388386 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ed705ca-195e-4f31-a76d-529ec10e4e2f-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-7pjkz\" (UID: \"2ed705ca-195e-4f31-a76d-529ec10e4e2f\") " pod="openstack/dnsmasq-dns-78dd6ddcc-7pjkz" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.388515 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ed705ca-195e-4f31-a76d-529ec10e4e2f-config\") pod \"dnsmasq-dns-78dd6ddcc-7pjkz\" (UID: \"2ed705ca-195e-4f31-a76d-529ec10e4e2f\") " pod="openstack/dnsmasq-dns-78dd6ddcc-7pjkz" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.388583 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x28rc\" (UniqueName: \"kubernetes.io/projected/2ed705ca-195e-4f31-a76d-529ec10e4e2f-kube-api-access-x28rc\") pod \"dnsmasq-dns-78dd6ddcc-7pjkz\" (UID: \"2ed705ca-195e-4f31-a76d-529ec10e4e2f\") " pod="openstack/dnsmasq-dns-78dd6ddcc-7pjkz" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.389820 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ed705ca-195e-4f31-a76d-529ec10e4e2f-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-7pjkz\" (UID: \"2ed705ca-195e-4f31-a76d-529ec10e4e2f\") " pod="openstack/dnsmasq-dns-78dd6ddcc-7pjkz" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.390525 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ed705ca-195e-4f31-a76d-529ec10e4e2f-config\") pod \"dnsmasq-dns-78dd6ddcc-7pjkz\" (UID: \"2ed705ca-195e-4f31-a76d-529ec10e4e2f\") " pod="openstack/dnsmasq-dns-78dd6ddcc-7pjkz" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.430482 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x28rc\" (UniqueName: \"kubernetes.io/projected/2ed705ca-195e-4f31-a76d-529ec10e4e2f-kube-api-access-x28rc\") pod \"dnsmasq-dns-78dd6ddcc-7pjkz\" (UID: \"2ed705ca-195e-4f31-a76d-529ec10e4e2f\") " pod="openstack/dnsmasq-dns-78dd6ddcc-7pjkz" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.479467 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-5x2mn" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.587642 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-7pjkz" Nov 24 08:11:37 crc kubenswrapper[4691]: I1124 08:11:37.930849 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-5x2mn"] Nov 24 08:11:38 crc kubenswrapper[4691]: I1124 08:11:38.104423 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-7pjkz"] Nov 24 08:11:38 crc kubenswrapper[4691]: W1124 08:11:38.110819 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2ed705ca_195e_4f31_a76d_529ec10e4e2f.slice/crio-2c2ce8038b5c113f203a4930c8950d6d1c2e31fffbd42a3f586af487ae7d8503 WatchSource:0}: Error finding container 2c2ce8038b5c113f203a4930c8950d6d1c2e31fffbd42a3f586af487ae7d8503: Status 404 returned error can't find the container with id 2c2ce8038b5c113f203a4930c8950d6d1c2e31fffbd42a3f586af487ae7d8503 Nov 24 08:11:38 crc kubenswrapper[4691]: I1124 08:11:38.909916 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-7pjkz" event={"ID":"2ed705ca-195e-4f31-a76d-529ec10e4e2f","Type":"ContainerStarted","Data":"2c2ce8038b5c113f203a4930c8950d6d1c2e31fffbd42a3f586af487ae7d8503"} Nov 24 08:11:38 crc kubenswrapper[4691]: I1124 08:11:38.912885 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-5x2mn" event={"ID":"40aec6d7-a3f1-461a-988b-8f87be6ad1a9","Type":"ContainerStarted","Data":"049925ae025ec8003daad907304673e9c5851762fb0a2a3df4742afc862edf8f"} Nov 24 08:11:40 crc kubenswrapper[4691]: I1124 08:11:40.504076 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-5x2mn"] Nov 24 08:11:40 crc kubenswrapper[4691]: I1124 08:11:40.579127 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-dv56l"] Nov 24 08:11:40 crc kubenswrapper[4691]: I1124 08:11:40.580578 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-dv56l" Nov 24 08:11:40 crc kubenswrapper[4691]: I1124 08:11:40.609484 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-dv56l"] Nov 24 08:11:40 crc kubenswrapper[4691]: I1124 08:11:40.745861 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ck2s5\" (UniqueName: \"kubernetes.io/projected/1759554c-7f60-492d-b11b-8cb45b0cc5be-kube-api-access-ck2s5\") pod \"dnsmasq-dns-666b6646f7-dv56l\" (UID: \"1759554c-7f60-492d-b11b-8cb45b0cc5be\") " pod="openstack/dnsmasq-dns-666b6646f7-dv56l" Nov 24 08:11:40 crc kubenswrapper[4691]: I1124 08:11:40.745968 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1759554c-7f60-492d-b11b-8cb45b0cc5be-config\") pod \"dnsmasq-dns-666b6646f7-dv56l\" (UID: \"1759554c-7f60-492d-b11b-8cb45b0cc5be\") " pod="openstack/dnsmasq-dns-666b6646f7-dv56l" Nov 24 08:11:40 crc kubenswrapper[4691]: I1124 08:11:40.746025 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1759554c-7f60-492d-b11b-8cb45b0cc5be-dns-svc\") pod \"dnsmasq-dns-666b6646f7-dv56l\" (UID: \"1759554c-7f60-492d-b11b-8cb45b0cc5be\") " pod="openstack/dnsmasq-dns-666b6646f7-dv56l" Nov 24 08:11:40 crc kubenswrapper[4691]: I1124 08:11:40.850275 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1759554c-7f60-492d-b11b-8cb45b0cc5be-dns-svc\") pod \"dnsmasq-dns-666b6646f7-dv56l\" (UID: \"1759554c-7f60-492d-b11b-8cb45b0cc5be\") " pod="openstack/dnsmasq-dns-666b6646f7-dv56l" Nov 24 08:11:40 crc kubenswrapper[4691]: I1124 08:11:40.850569 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ck2s5\" (UniqueName: \"kubernetes.io/projected/1759554c-7f60-492d-b11b-8cb45b0cc5be-kube-api-access-ck2s5\") pod \"dnsmasq-dns-666b6646f7-dv56l\" (UID: \"1759554c-7f60-492d-b11b-8cb45b0cc5be\") " pod="openstack/dnsmasq-dns-666b6646f7-dv56l" Nov 24 08:11:40 crc kubenswrapper[4691]: I1124 08:11:40.850931 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1759554c-7f60-492d-b11b-8cb45b0cc5be-config\") pod \"dnsmasq-dns-666b6646f7-dv56l\" (UID: \"1759554c-7f60-492d-b11b-8cb45b0cc5be\") " pod="openstack/dnsmasq-dns-666b6646f7-dv56l" Nov 24 08:11:40 crc kubenswrapper[4691]: I1124 08:11:40.853221 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1759554c-7f60-492d-b11b-8cb45b0cc5be-config\") pod \"dnsmasq-dns-666b6646f7-dv56l\" (UID: \"1759554c-7f60-492d-b11b-8cb45b0cc5be\") " pod="openstack/dnsmasq-dns-666b6646f7-dv56l" Nov 24 08:11:40 crc kubenswrapper[4691]: I1124 08:11:40.853374 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1759554c-7f60-492d-b11b-8cb45b0cc5be-dns-svc\") pod \"dnsmasq-dns-666b6646f7-dv56l\" (UID: \"1759554c-7f60-492d-b11b-8cb45b0cc5be\") " pod="openstack/dnsmasq-dns-666b6646f7-dv56l" Nov 24 08:11:40 crc kubenswrapper[4691]: I1124 08:11:40.870489 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-7pjkz"] Nov 24 08:11:40 crc kubenswrapper[4691]: I1124 08:11:40.906516 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ck2s5\" (UniqueName: \"kubernetes.io/projected/1759554c-7f60-492d-b11b-8cb45b0cc5be-kube-api-access-ck2s5\") pod \"dnsmasq-dns-666b6646f7-dv56l\" (UID: \"1759554c-7f60-492d-b11b-8cb45b0cc5be\") " pod="openstack/dnsmasq-dns-666b6646f7-dv56l" Nov 24 08:11:40 crc kubenswrapper[4691]: I1124 08:11:40.908875 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-dv56l" Nov 24 08:11:40 crc kubenswrapper[4691]: I1124 08:11:40.932546 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-nxrrf"] Nov 24 08:11:40 crc kubenswrapper[4691]: I1124 08:11:40.934475 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-nxrrf" Nov 24 08:11:40 crc kubenswrapper[4691]: I1124 08:11:40.956838 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/282ca99b-ddc6-4450-8f8c-6a8e40144d35-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-nxrrf\" (UID: \"282ca99b-ddc6-4450-8f8c-6a8e40144d35\") " pod="openstack/dnsmasq-dns-57d769cc4f-nxrrf" Nov 24 08:11:40 crc kubenswrapper[4691]: I1124 08:11:40.956915 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/282ca99b-ddc6-4450-8f8c-6a8e40144d35-config\") pod \"dnsmasq-dns-57d769cc4f-nxrrf\" (UID: \"282ca99b-ddc6-4450-8f8c-6a8e40144d35\") " pod="openstack/dnsmasq-dns-57d769cc4f-nxrrf" Nov 24 08:11:40 crc kubenswrapper[4691]: I1124 08:11:40.957138 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrftx\" (UniqueName: \"kubernetes.io/projected/282ca99b-ddc6-4450-8f8c-6a8e40144d35-kube-api-access-nrftx\") pod \"dnsmasq-dns-57d769cc4f-nxrrf\" (UID: \"282ca99b-ddc6-4450-8f8c-6a8e40144d35\") " pod="openstack/dnsmasq-dns-57d769cc4f-nxrrf" Nov 24 08:11:40 crc kubenswrapper[4691]: I1124 08:11:40.988028 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-nxrrf"] Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.061027 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/282ca99b-ddc6-4450-8f8c-6a8e40144d35-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-nxrrf\" (UID: \"282ca99b-ddc6-4450-8f8c-6a8e40144d35\") " pod="openstack/dnsmasq-dns-57d769cc4f-nxrrf" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.061108 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/282ca99b-ddc6-4450-8f8c-6a8e40144d35-config\") pod \"dnsmasq-dns-57d769cc4f-nxrrf\" (UID: \"282ca99b-ddc6-4450-8f8c-6a8e40144d35\") " pod="openstack/dnsmasq-dns-57d769cc4f-nxrrf" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.061295 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrftx\" (UniqueName: \"kubernetes.io/projected/282ca99b-ddc6-4450-8f8c-6a8e40144d35-kube-api-access-nrftx\") pod \"dnsmasq-dns-57d769cc4f-nxrrf\" (UID: \"282ca99b-ddc6-4450-8f8c-6a8e40144d35\") " pod="openstack/dnsmasq-dns-57d769cc4f-nxrrf" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.063208 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/282ca99b-ddc6-4450-8f8c-6a8e40144d35-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-nxrrf\" (UID: \"282ca99b-ddc6-4450-8f8c-6a8e40144d35\") " pod="openstack/dnsmasq-dns-57d769cc4f-nxrrf" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.063561 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/282ca99b-ddc6-4450-8f8c-6a8e40144d35-config\") pod \"dnsmasq-dns-57d769cc4f-nxrrf\" (UID: \"282ca99b-ddc6-4450-8f8c-6a8e40144d35\") " pod="openstack/dnsmasq-dns-57d769cc4f-nxrrf" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.088004 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrftx\" (UniqueName: \"kubernetes.io/projected/282ca99b-ddc6-4450-8f8c-6a8e40144d35-kube-api-access-nrftx\") pod \"dnsmasq-dns-57d769cc4f-nxrrf\" (UID: \"282ca99b-ddc6-4450-8f8c-6a8e40144d35\") " pod="openstack/dnsmasq-dns-57d769cc4f-nxrrf" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.275042 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-nxrrf" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.518093 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-dv56l"] Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.701984 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.708576 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.711447 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-9tm7h" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.714757 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.715022 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.715070 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.715822 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.715840 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.715898 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.718525 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.782356 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-nxrrf"] Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.879338 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/224d72d8-5d0a-48df-8930-2cb28fc1fd93-config-data\") pod \"rabbitmq-server-0\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " pod="openstack/rabbitmq-server-0" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.879397 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/224d72d8-5d0a-48df-8930-2cb28fc1fd93-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " pod="openstack/rabbitmq-server-0" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.879423 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m6hd2\" (UniqueName: \"kubernetes.io/projected/224d72d8-5d0a-48df-8930-2cb28fc1fd93-kube-api-access-m6hd2\") pod \"rabbitmq-server-0\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " pod="openstack/rabbitmq-server-0" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.879476 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " pod="openstack/rabbitmq-server-0" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.879506 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/224d72d8-5d0a-48df-8930-2cb28fc1fd93-server-conf\") pod \"rabbitmq-server-0\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " pod="openstack/rabbitmq-server-0" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.879545 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/224d72d8-5d0a-48df-8930-2cb28fc1fd93-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " pod="openstack/rabbitmq-server-0" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.879565 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/224d72d8-5d0a-48df-8930-2cb28fc1fd93-pod-info\") pod \"rabbitmq-server-0\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " pod="openstack/rabbitmq-server-0" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.879588 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/224d72d8-5d0a-48df-8930-2cb28fc1fd93-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " pod="openstack/rabbitmq-server-0" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.879629 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/224d72d8-5d0a-48df-8930-2cb28fc1fd93-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " pod="openstack/rabbitmq-server-0" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.879667 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/224d72d8-5d0a-48df-8930-2cb28fc1fd93-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " pod="openstack/rabbitmq-server-0" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.879691 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/224d72d8-5d0a-48df-8930-2cb28fc1fd93-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " pod="openstack/rabbitmq-server-0" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.951740 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-dv56l" event={"ID":"1759554c-7f60-492d-b11b-8cb45b0cc5be","Type":"ContainerStarted","Data":"1375709cda165f61c7695105a18250134e999db7d2885999dd37234941727d35"} Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.981599 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/224d72d8-5d0a-48df-8930-2cb28fc1fd93-server-conf\") pod \"rabbitmq-server-0\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " pod="openstack/rabbitmq-server-0" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.981682 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/224d72d8-5d0a-48df-8930-2cb28fc1fd93-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " pod="openstack/rabbitmq-server-0" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.981707 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/224d72d8-5d0a-48df-8930-2cb28fc1fd93-pod-info\") pod \"rabbitmq-server-0\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " pod="openstack/rabbitmq-server-0" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.981723 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/224d72d8-5d0a-48df-8930-2cb28fc1fd93-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " pod="openstack/rabbitmq-server-0" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.981760 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/224d72d8-5d0a-48df-8930-2cb28fc1fd93-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " pod="openstack/rabbitmq-server-0" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.981790 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/224d72d8-5d0a-48df-8930-2cb28fc1fd93-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " pod="openstack/rabbitmq-server-0" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.981809 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/224d72d8-5d0a-48df-8930-2cb28fc1fd93-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " pod="openstack/rabbitmq-server-0" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.981834 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/224d72d8-5d0a-48df-8930-2cb28fc1fd93-config-data\") pod \"rabbitmq-server-0\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " pod="openstack/rabbitmq-server-0" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.981855 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/224d72d8-5d0a-48df-8930-2cb28fc1fd93-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " pod="openstack/rabbitmq-server-0" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.981875 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m6hd2\" (UniqueName: \"kubernetes.io/projected/224d72d8-5d0a-48df-8930-2cb28fc1fd93-kube-api-access-m6hd2\") pod \"rabbitmq-server-0\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " pod="openstack/rabbitmq-server-0" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.981902 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " pod="openstack/rabbitmq-server-0" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.982285 4691 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/rabbitmq-server-0" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.983104 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/224d72d8-5d0a-48df-8930-2cb28fc1fd93-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " pod="openstack/rabbitmq-server-0" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.983826 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/224d72d8-5d0a-48df-8930-2cb28fc1fd93-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " pod="openstack/rabbitmq-server-0" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.984886 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/224d72d8-5d0a-48df-8930-2cb28fc1fd93-config-data\") pod \"rabbitmq-server-0\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " pod="openstack/rabbitmq-server-0" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.985068 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/224d72d8-5d0a-48df-8930-2cb28fc1fd93-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " pod="openstack/rabbitmq-server-0" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.985199 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/224d72d8-5d0a-48df-8930-2cb28fc1fd93-server-conf\") pod \"rabbitmq-server-0\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " pod="openstack/rabbitmq-server-0" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.990725 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/224d72d8-5d0a-48df-8930-2cb28fc1fd93-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " pod="openstack/rabbitmq-server-0" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.991236 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/224d72d8-5d0a-48df-8930-2cb28fc1fd93-pod-info\") pod \"rabbitmq-server-0\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " pod="openstack/rabbitmq-server-0" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.995468 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/224d72d8-5d0a-48df-8930-2cb28fc1fd93-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " pod="openstack/rabbitmq-server-0" Nov 24 08:11:41 crc kubenswrapper[4691]: I1124 08:11:41.995858 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/224d72d8-5d0a-48df-8930-2cb28fc1fd93-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " pod="openstack/rabbitmq-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.007057 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m6hd2\" (UniqueName: \"kubernetes.io/projected/224d72d8-5d0a-48df-8930-2cb28fc1fd93-kube-api-access-m6hd2\") pod \"rabbitmq-server-0\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " pod="openstack/rabbitmq-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.011995 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " pod="openstack/rabbitmq-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.044888 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.063602 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.065262 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.068850 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.069001 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.069166 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.069273 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.070920 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.071240 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.074495 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-6lbs8" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.075524 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.184664 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fbpv5\" (UniqueName: \"kubernetes.io/projected/60038211-87c8-4170-8fd0-35df8a16aa92-kube-api-access-fbpv5\") pod \"rabbitmq-cell1-server-0\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.184718 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/60038211-87c8-4170-8fd0-35df8a16aa92-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.184751 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/60038211-87c8-4170-8fd0-35df8a16aa92-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.184782 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/60038211-87c8-4170-8fd0-35df8a16aa92-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.184798 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/60038211-87c8-4170-8fd0-35df8a16aa92-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.184820 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/60038211-87c8-4170-8fd0-35df8a16aa92-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.184851 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.184893 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/60038211-87c8-4170-8fd0-35df8a16aa92-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.184913 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/60038211-87c8-4170-8fd0-35df8a16aa92-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.184945 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/60038211-87c8-4170-8fd0-35df8a16aa92-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.184980 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/60038211-87c8-4170-8fd0-35df8a16aa92-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.286754 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/60038211-87c8-4170-8fd0-35df8a16aa92-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.286800 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/60038211-87c8-4170-8fd0-35df8a16aa92-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.286833 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/60038211-87c8-4170-8fd0-35df8a16aa92-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.286871 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/60038211-87c8-4170-8fd0-35df8a16aa92-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.286896 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fbpv5\" (UniqueName: \"kubernetes.io/projected/60038211-87c8-4170-8fd0-35df8a16aa92-kube-api-access-fbpv5\") pod \"rabbitmq-cell1-server-0\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.286914 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/60038211-87c8-4170-8fd0-35df8a16aa92-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.286937 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/60038211-87c8-4170-8fd0-35df8a16aa92-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.286957 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/60038211-87c8-4170-8fd0-35df8a16aa92-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.286970 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/60038211-87c8-4170-8fd0-35df8a16aa92-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.286990 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/60038211-87c8-4170-8fd0-35df8a16aa92-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.287015 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.287364 4691 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.287580 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/60038211-87c8-4170-8fd0-35df8a16aa92-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.288593 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/60038211-87c8-4170-8fd0-35df8a16aa92-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.288594 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/60038211-87c8-4170-8fd0-35df8a16aa92-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.289171 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/60038211-87c8-4170-8fd0-35df8a16aa92-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.289595 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/60038211-87c8-4170-8fd0-35df8a16aa92-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.291142 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/60038211-87c8-4170-8fd0-35df8a16aa92-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.295194 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/60038211-87c8-4170-8fd0-35df8a16aa92-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.311866 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/60038211-87c8-4170-8fd0-35df8a16aa92-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.311952 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fbpv5\" (UniqueName: \"kubernetes.io/projected/60038211-87c8-4170-8fd0-35df8a16aa92-kube-api-access-fbpv5\") pod \"rabbitmq-cell1-server-0\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.311925 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/60038211-87c8-4170-8fd0-35df8a16aa92-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.321671 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:42 crc kubenswrapper[4691]: I1124 08:11:42.409606 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:11:43 crc kubenswrapper[4691]: I1124 08:11:43.176335 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 24 08:11:43 crc kubenswrapper[4691]: I1124 08:11:43.178364 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 24 08:11:43 crc kubenswrapper[4691]: I1124 08:11:43.181041 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-vchtz" Nov 24 08:11:43 crc kubenswrapper[4691]: I1124 08:11:43.181309 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 24 08:11:43 crc kubenswrapper[4691]: I1124 08:11:43.186615 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 24 08:11:43 crc kubenswrapper[4691]: I1124 08:11:43.186850 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 24 08:11:43 crc kubenswrapper[4691]: I1124 08:11:43.189882 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 24 08:11:43 crc kubenswrapper[4691]: I1124 08:11:43.248785 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 24 08:11:43 crc kubenswrapper[4691]: I1124 08:11:43.303492 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/be26bfeb-e0f8-4c67-8938-55d8399b717c-kolla-config\") pod \"openstack-galera-0\" (UID: \"be26bfeb-e0f8-4c67-8938-55d8399b717c\") " pod="openstack/openstack-galera-0" Nov 24 08:11:43 crc kubenswrapper[4691]: I1124 08:11:43.303583 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-0\" (UID: \"be26bfeb-e0f8-4c67-8938-55d8399b717c\") " pod="openstack/openstack-galera-0" Nov 24 08:11:43 crc kubenswrapper[4691]: I1124 08:11:43.303627 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be26bfeb-e0f8-4c67-8938-55d8399b717c-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"be26bfeb-e0f8-4c67-8938-55d8399b717c\") " pod="openstack/openstack-galera-0" Nov 24 08:11:43 crc kubenswrapper[4691]: I1124 08:11:43.303664 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/be26bfeb-e0f8-4c67-8938-55d8399b717c-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"be26bfeb-e0f8-4c67-8938-55d8399b717c\") " pod="openstack/openstack-galera-0" Nov 24 08:11:43 crc kubenswrapper[4691]: I1124 08:11:43.303686 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/be26bfeb-e0f8-4c67-8938-55d8399b717c-operator-scripts\") pod \"openstack-galera-0\" (UID: \"be26bfeb-e0f8-4c67-8938-55d8399b717c\") " pod="openstack/openstack-galera-0" Nov 24 08:11:43 crc kubenswrapper[4691]: I1124 08:11:43.303729 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/be26bfeb-e0f8-4c67-8938-55d8399b717c-config-data-default\") pod \"openstack-galera-0\" (UID: \"be26bfeb-e0f8-4c67-8938-55d8399b717c\") " pod="openstack/openstack-galera-0" Nov 24 08:11:43 crc kubenswrapper[4691]: I1124 08:11:43.303762 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/be26bfeb-e0f8-4c67-8938-55d8399b717c-config-data-generated\") pod \"openstack-galera-0\" (UID: \"be26bfeb-e0f8-4c67-8938-55d8399b717c\") " pod="openstack/openstack-galera-0" Nov 24 08:11:43 crc kubenswrapper[4691]: I1124 08:11:43.303799 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rb9mh\" (UniqueName: \"kubernetes.io/projected/be26bfeb-e0f8-4c67-8938-55d8399b717c-kube-api-access-rb9mh\") pod \"openstack-galera-0\" (UID: \"be26bfeb-e0f8-4c67-8938-55d8399b717c\") " pod="openstack/openstack-galera-0" Nov 24 08:11:43 crc kubenswrapper[4691]: I1124 08:11:43.405638 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rb9mh\" (UniqueName: \"kubernetes.io/projected/be26bfeb-e0f8-4c67-8938-55d8399b717c-kube-api-access-rb9mh\") pod \"openstack-galera-0\" (UID: \"be26bfeb-e0f8-4c67-8938-55d8399b717c\") " pod="openstack/openstack-galera-0" Nov 24 08:11:43 crc kubenswrapper[4691]: I1124 08:11:43.405744 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/be26bfeb-e0f8-4c67-8938-55d8399b717c-kolla-config\") pod \"openstack-galera-0\" (UID: \"be26bfeb-e0f8-4c67-8938-55d8399b717c\") " pod="openstack/openstack-galera-0" Nov 24 08:11:43 crc kubenswrapper[4691]: I1124 08:11:43.405804 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-0\" (UID: \"be26bfeb-e0f8-4c67-8938-55d8399b717c\") " pod="openstack/openstack-galera-0" Nov 24 08:11:43 crc kubenswrapper[4691]: I1124 08:11:43.405837 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be26bfeb-e0f8-4c67-8938-55d8399b717c-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"be26bfeb-e0f8-4c67-8938-55d8399b717c\") " pod="openstack/openstack-galera-0" Nov 24 08:11:43 crc kubenswrapper[4691]: I1124 08:11:43.405865 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/be26bfeb-e0f8-4c67-8938-55d8399b717c-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"be26bfeb-e0f8-4c67-8938-55d8399b717c\") " pod="openstack/openstack-galera-0" Nov 24 08:11:43 crc kubenswrapper[4691]: I1124 08:11:43.405891 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/be26bfeb-e0f8-4c67-8938-55d8399b717c-operator-scripts\") pod \"openstack-galera-0\" (UID: \"be26bfeb-e0f8-4c67-8938-55d8399b717c\") " pod="openstack/openstack-galera-0" Nov 24 08:11:43 crc kubenswrapper[4691]: I1124 08:11:43.405910 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/be26bfeb-e0f8-4c67-8938-55d8399b717c-config-data-default\") pod \"openstack-galera-0\" (UID: \"be26bfeb-e0f8-4c67-8938-55d8399b717c\") " pod="openstack/openstack-galera-0" Nov 24 08:11:43 crc kubenswrapper[4691]: I1124 08:11:43.405935 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/be26bfeb-e0f8-4c67-8938-55d8399b717c-config-data-generated\") pod \"openstack-galera-0\" (UID: \"be26bfeb-e0f8-4c67-8938-55d8399b717c\") " pod="openstack/openstack-galera-0" Nov 24 08:11:43 crc kubenswrapper[4691]: I1124 08:11:43.406001 4691 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-0\" (UID: \"be26bfeb-e0f8-4c67-8938-55d8399b717c\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/openstack-galera-0" Nov 24 08:11:43 crc kubenswrapper[4691]: I1124 08:11:43.406629 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/be26bfeb-e0f8-4c67-8938-55d8399b717c-kolla-config\") pod \"openstack-galera-0\" (UID: \"be26bfeb-e0f8-4c67-8938-55d8399b717c\") " pod="openstack/openstack-galera-0" Nov 24 08:11:43 crc kubenswrapper[4691]: I1124 08:11:43.406729 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/be26bfeb-e0f8-4c67-8938-55d8399b717c-config-data-generated\") pod \"openstack-galera-0\" (UID: \"be26bfeb-e0f8-4c67-8938-55d8399b717c\") " pod="openstack/openstack-galera-0" Nov 24 08:11:43 crc kubenswrapper[4691]: I1124 08:11:43.407557 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/be26bfeb-e0f8-4c67-8938-55d8399b717c-config-data-default\") pod \"openstack-galera-0\" (UID: \"be26bfeb-e0f8-4c67-8938-55d8399b717c\") " pod="openstack/openstack-galera-0" Nov 24 08:11:43 crc kubenswrapper[4691]: I1124 08:11:43.408029 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/be26bfeb-e0f8-4c67-8938-55d8399b717c-operator-scripts\") pod \"openstack-galera-0\" (UID: \"be26bfeb-e0f8-4c67-8938-55d8399b717c\") " pod="openstack/openstack-galera-0" Nov 24 08:11:43 crc kubenswrapper[4691]: I1124 08:11:43.416226 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/be26bfeb-e0f8-4c67-8938-55d8399b717c-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"be26bfeb-e0f8-4c67-8938-55d8399b717c\") " pod="openstack/openstack-galera-0" Nov 24 08:11:43 crc kubenswrapper[4691]: I1124 08:11:43.416329 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be26bfeb-e0f8-4c67-8938-55d8399b717c-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"be26bfeb-e0f8-4c67-8938-55d8399b717c\") " pod="openstack/openstack-galera-0" Nov 24 08:11:43 crc kubenswrapper[4691]: I1124 08:11:43.436791 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-0\" (UID: \"be26bfeb-e0f8-4c67-8938-55d8399b717c\") " pod="openstack/openstack-galera-0" Nov 24 08:11:43 crc kubenswrapper[4691]: I1124 08:11:43.483097 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rb9mh\" (UniqueName: \"kubernetes.io/projected/be26bfeb-e0f8-4c67-8938-55d8399b717c-kube-api-access-rb9mh\") pod \"openstack-galera-0\" (UID: \"be26bfeb-e0f8-4c67-8938-55d8399b717c\") " pod="openstack/openstack-galera-0" Nov 24 08:11:43 crc kubenswrapper[4691]: I1124 08:11:43.553345 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.522196 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.524360 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.527460 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.527447 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-jm8pb" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.527536 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.527513 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.536861 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.641989 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/5021ba85-77e5-4fc8-8816-5ad1587b82e5-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"5021ba85-77e5-4fc8-8816-5ad1587b82e5\") " pod="openstack/openstack-cell1-galera-0" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.642060 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5021ba85-77e5-4fc8-8816-5ad1587b82e5-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"5021ba85-77e5-4fc8-8816-5ad1587b82e5\") " pod="openstack/openstack-cell1-galera-0" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.642120 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/5021ba85-77e5-4fc8-8816-5ad1587b82e5-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"5021ba85-77e5-4fc8-8816-5ad1587b82e5\") " pod="openstack/openstack-cell1-galera-0" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.642143 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/5021ba85-77e5-4fc8-8816-5ad1587b82e5-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"5021ba85-77e5-4fc8-8816-5ad1587b82e5\") " pod="openstack/openstack-cell1-galera-0" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.642199 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5021ba85-77e5-4fc8-8816-5ad1587b82e5-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"5021ba85-77e5-4fc8-8816-5ad1587b82e5\") " pod="openstack/openstack-cell1-galera-0" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.642240 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"5021ba85-77e5-4fc8-8816-5ad1587b82e5\") " pod="openstack/openstack-cell1-galera-0" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.642300 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/5021ba85-77e5-4fc8-8816-5ad1587b82e5-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"5021ba85-77e5-4fc8-8816-5ad1587b82e5\") " pod="openstack/openstack-cell1-galera-0" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.642369 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zc26p\" (UniqueName: \"kubernetes.io/projected/5021ba85-77e5-4fc8-8816-5ad1587b82e5-kube-api-access-zc26p\") pod \"openstack-cell1-galera-0\" (UID: \"5021ba85-77e5-4fc8-8816-5ad1587b82e5\") " pod="openstack/openstack-cell1-galera-0" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.743781 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/5021ba85-77e5-4fc8-8816-5ad1587b82e5-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"5021ba85-77e5-4fc8-8816-5ad1587b82e5\") " pod="openstack/openstack-cell1-galera-0" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.743841 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5021ba85-77e5-4fc8-8816-5ad1587b82e5-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"5021ba85-77e5-4fc8-8816-5ad1587b82e5\") " pod="openstack/openstack-cell1-galera-0" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.743882 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/5021ba85-77e5-4fc8-8816-5ad1587b82e5-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"5021ba85-77e5-4fc8-8816-5ad1587b82e5\") " pod="openstack/openstack-cell1-galera-0" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.743903 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/5021ba85-77e5-4fc8-8816-5ad1587b82e5-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"5021ba85-77e5-4fc8-8816-5ad1587b82e5\") " pod="openstack/openstack-cell1-galera-0" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.743935 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5021ba85-77e5-4fc8-8816-5ad1587b82e5-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"5021ba85-77e5-4fc8-8816-5ad1587b82e5\") " pod="openstack/openstack-cell1-galera-0" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.743955 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"5021ba85-77e5-4fc8-8816-5ad1587b82e5\") " pod="openstack/openstack-cell1-galera-0" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.743992 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/5021ba85-77e5-4fc8-8816-5ad1587b82e5-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"5021ba85-77e5-4fc8-8816-5ad1587b82e5\") " pod="openstack/openstack-cell1-galera-0" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.744049 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zc26p\" (UniqueName: \"kubernetes.io/projected/5021ba85-77e5-4fc8-8816-5ad1587b82e5-kube-api-access-zc26p\") pod \"openstack-cell1-galera-0\" (UID: \"5021ba85-77e5-4fc8-8816-5ad1587b82e5\") " pod="openstack/openstack-cell1-galera-0" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.744609 4691 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"5021ba85-77e5-4fc8-8816-5ad1587b82e5\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/openstack-cell1-galera-0" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.744765 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/5021ba85-77e5-4fc8-8816-5ad1587b82e5-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"5021ba85-77e5-4fc8-8816-5ad1587b82e5\") " pod="openstack/openstack-cell1-galera-0" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.745309 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/5021ba85-77e5-4fc8-8816-5ad1587b82e5-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"5021ba85-77e5-4fc8-8816-5ad1587b82e5\") " pod="openstack/openstack-cell1-galera-0" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.745433 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/5021ba85-77e5-4fc8-8816-5ad1587b82e5-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"5021ba85-77e5-4fc8-8816-5ad1587b82e5\") " pod="openstack/openstack-cell1-galera-0" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.746536 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5021ba85-77e5-4fc8-8816-5ad1587b82e5-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"5021ba85-77e5-4fc8-8816-5ad1587b82e5\") " pod="openstack/openstack-cell1-galera-0" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.753306 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/5021ba85-77e5-4fc8-8816-5ad1587b82e5-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"5021ba85-77e5-4fc8-8816-5ad1587b82e5\") " pod="openstack/openstack-cell1-galera-0" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.771339 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5021ba85-77e5-4fc8-8816-5ad1587b82e5-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"5021ba85-77e5-4fc8-8816-5ad1587b82e5\") " pod="openstack/openstack-cell1-galera-0" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.777214 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zc26p\" (UniqueName: \"kubernetes.io/projected/5021ba85-77e5-4fc8-8816-5ad1587b82e5-kube-api-access-zc26p\") pod \"openstack-cell1-galera-0\" (UID: \"5021ba85-77e5-4fc8-8816-5ad1587b82e5\") " pod="openstack/openstack-cell1-galera-0" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.783564 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"5021ba85-77e5-4fc8-8816-5ad1587b82e5\") " pod="openstack/openstack-cell1-galera-0" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.839448 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.842279 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.845697 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-qxzq9" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.845780 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.845965 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.846213 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.852782 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.947296 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb7ce1da-e87a-4d10-b6ad-f9f2e0d022b4-combined-ca-bundle\") pod \"memcached-0\" (UID: \"cb7ce1da-e87a-4d10-b6ad-f9f2e0d022b4\") " pod="openstack/memcached-0" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.947399 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bp28d\" (UniqueName: \"kubernetes.io/projected/cb7ce1da-e87a-4d10-b6ad-f9f2e0d022b4-kube-api-access-bp28d\") pod \"memcached-0\" (UID: \"cb7ce1da-e87a-4d10-b6ad-f9f2e0d022b4\") " pod="openstack/memcached-0" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.947521 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cb7ce1da-e87a-4d10-b6ad-f9f2e0d022b4-kolla-config\") pod \"memcached-0\" (UID: \"cb7ce1da-e87a-4d10-b6ad-f9f2e0d022b4\") " pod="openstack/memcached-0" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.947547 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cb7ce1da-e87a-4d10-b6ad-f9f2e0d022b4-config-data\") pod \"memcached-0\" (UID: \"cb7ce1da-e87a-4d10-b6ad-f9f2e0d022b4\") " pod="openstack/memcached-0" Nov 24 08:11:44 crc kubenswrapper[4691]: I1124 08:11:44.947684 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/cb7ce1da-e87a-4d10-b6ad-f9f2e0d022b4-memcached-tls-certs\") pod \"memcached-0\" (UID: \"cb7ce1da-e87a-4d10-b6ad-f9f2e0d022b4\") " pod="openstack/memcached-0" Nov 24 08:11:45 crc kubenswrapper[4691]: I1124 08:11:45.049124 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb7ce1da-e87a-4d10-b6ad-f9f2e0d022b4-combined-ca-bundle\") pod \"memcached-0\" (UID: \"cb7ce1da-e87a-4d10-b6ad-f9f2e0d022b4\") " pod="openstack/memcached-0" Nov 24 08:11:45 crc kubenswrapper[4691]: I1124 08:11:45.049204 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bp28d\" (UniqueName: \"kubernetes.io/projected/cb7ce1da-e87a-4d10-b6ad-f9f2e0d022b4-kube-api-access-bp28d\") pod \"memcached-0\" (UID: \"cb7ce1da-e87a-4d10-b6ad-f9f2e0d022b4\") " pod="openstack/memcached-0" Nov 24 08:11:45 crc kubenswrapper[4691]: I1124 08:11:45.049254 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cb7ce1da-e87a-4d10-b6ad-f9f2e0d022b4-kolla-config\") pod \"memcached-0\" (UID: \"cb7ce1da-e87a-4d10-b6ad-f9f2e0d022b4\") " pod="openstack/memcached-0" Nov 24 08:11:45 crc kubenswrapper[4691]: I1124 08:11:45.049284 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cb7ce1da-e87a-4d10-b6ad-f9f2e0d022b4-config-data\") pod \"memcached-0\" (UID: \"cb7ce1da-e87a-4d10-b6ad-f9f2e0d022b4\") " pod="openstack/memcached-0" Nov 24 08:11:45 crc kubenswrapper[4691]: I1124 08:11:45.049351 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/cb7ce1da-e87a-4d10-b6ad-f9f2e0d022b4-memcached-tls-certs\") pod \"memcached-0\" (UID: \"cb7ce1da-e87a-4d10-b6ad-f9f2e0d022b4\") " pod="openstack/memcached-0" Nov 24 08:11:45 crc kubenswrapper[4691]: I1124 08:11:45.050793 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cb7ce1da-e87a-4d10-b6ad-f9f2e0d022b4-kolla-config\") pod \"memcached-0\" (UID: \"cb7ce1da-e87a-4d10-b6ad-f9f2e0d022b4\") " pod="openstack/memcached-0" Nov 24 08:11:45 crc kubenswrapper[4691]: I1124 08:11:45.051369 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cb7ce1da-e87a-4d10-b6ad-f9f2e0d022b4-config-data\") pod \"memcached-0\" (UID: \"cb7ce1da-e87a-4d10-b6ad-f9f2e0d022b4\") " pod="openstack/memcached-0" Nov 24 08:11:45 crc kubenswrapper[4691]: I1124 08:11:45.054629 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/cb7ce1da-e87a-4d10-b6ad-f9f2e0d022b4-memcached-tls-certs\") pod \"memcached-0\" (UID: \"cb7ce1da-e87a-4d10-b6ad-f9f2e0d022b4\") " pod="openstack/memcached-0" Nov 24 08:11:45 crc kubenswrapper[4691]: I1124 08:11:45.062648 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb7ce1da-e87a-4d10-b6ad-f9f2e0d022b4-combined-ca-bundle\") pod \"memcached-0\" (UID: \"cb7ce1da-e87a-4d10-b6ad-f9f2e0d022b4\") " pod="openstack/memcached-0" Nov 24 08:11:45 crc kubenswrapper[4691]: I1124 08:11:45.063288 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-nxrrf" event={"ID":"282ca99b-ddc6-4450-8f8c-6a8e40144d35","Type":"ContainerStarted","Data":"b81080fd17f87f63e352e5b930b1060e343e573863bd93d58b38171a3ad86931"} Nov 24 08:11:45 crc kubenswrapper[4691]: I1124 08:11:45.085005 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bp28d\" (UniqueName: \"kubernetes.io/projected/cb7ce1da-e87a-4d10-b6ad-f9f2e0d022b4-kube-api-access-bp28d\") pod \"memcached-0\" (UID: \"cb7ce1da-e87a-4d10-b6ad-f9f2e0d022b4\") " pod="openstack/memcached-0" Nov 24 08:11:45 crc kubenswrapper[4691]: I1124 08:11:45.179881 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 24 08:11:46 crc kubenswrapper[4691]: I1124 08:11:46.704102 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 24 08:11:46 crc kubenswrapper[4691]: I1124 08:11:46.705294 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 24 08:11:46 crc kubenswrapper[4691]: I1124 08:11:46.710138 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-phsm7" Nov 24 08:11:46 crc kubenswrapper[4691]: I1124 08:11:46.798711 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8sw2\" (UniqueName: \"kubernetes.io/projected/8c2f2c51-cf66-4a86-917b-52d20691e85b-kube-api-access-w8sw2\") pod \"kube-state-metrics-0\" (UID: \"8c2f2c51-cf66-4a86-917b-52d20691e85b\") " pod="openstack/kube-state-metrics-0" Nov 24 08:11:46 crc kubenswrapper[4691]: I1124 08:11:46.808251 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 24 08:11:46 crc kubenswrapper[4691]: I1124 08:11:46.901358 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8sw2\" (UniqueName: \"kubernetes.io/projected/8c2f2c51-cf66-4a86-917b-52d20691e85b-kube-api-access-w8sw2\") pod \"kube-state-metrics-0\" (UID: \"8c2f2c51-cf66-4a86-917b-52d20691e85b\") " pod="openstack/kube-state-metrics-0" Nov 24 08:11:46 crc kubenswrapper[4691]: I1124 08:11:46.925542 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8sw2\" (UniqueName: \"kubernetes.io/projected/8c2f2c51-cf66-4a86-917b-52d20691e85b-kube-api-access-w8sw2\") pod \"kube-state-metrics-0\" (UID: \"8c2f2c51-cf66-4a86-917b-52d20691e85b\") " pod="openstack/kube-state-metrics-0" Nov 24 08:11:47 crc kubenswrapper[4691]: I1124 08:11:47.025487 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 24 08:11:49 crc kubenswrapper[4691]: I1124 08:11:49.144809 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 24 08:11:49 crc kubenswrapper[4691]: I1124 08:11:49.715472 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 24 08:11:49 crc kubenswrapper[4691]: I1124 08:11:49.720120 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 24 08:11:49 crc kubenswrapper[4691]: I1124 08:11:49.723475 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 24 08:11:49 crc kubenswrapper[4691]: I1124 08:11:49.725316 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 24 08:11:49 crc kubenswrapper[4691]: I1124 08:11:49.725690 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-fjh2h" Nov 24 08:11:49 crc kubenswrapper[4691]: I1124 08:11:49.725920 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 24 08:11:49 crc kubenswrapper[4691]: I1124 08:11:49.726137 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 24 08:11:49 crc kubenswrapper[4691]: I1124 08:11:49.726340 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 24 08:11:49 crc kubenswrapper[4691]: I1124 08:11:49.850749 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7d275bbe-d927-40c6-83b6-ad6da7f2a83c-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"7d275bbe-d927-40c6-83b6-ad6da7f2a83c\") " pod="openstack/ovsdbserver-nb-0" Nov 24 08:11:49 crc kubenswrapper[4691]: I1124 08:11:49.850819 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7d275bbe-d927-40c6-83b6-ad6da7f2a83c-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"7d275bbe-d927-40c6-83b6-ad6da7f2a83c\") " pod="openstack/ovsdbserver-nb-0" Nov 24 08:11:49 crc kubenswrapper[4691]: I1124 08:11:49.850863 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7d275bbe-d927-40c6-83b6-ad6da7f2a83c-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"7d275bbe-d927-40c6-83b6-ad6da7f2a83c\") " pod="openstack/ovsdbserver-nb-0" Nov 24 08:11:49 crc kubenswrapper[4691]: I1124 08:11:49.850890 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d275bbe-d927-40c6-83b6-ad6da7f2a83c-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"7d275bbe-d927-40c6-83b6-ad6da7f2a83c\") " pod="openstack/ovsdbserver-nb-0" Nov 24 08:11:49 crc kubenswrapper[4691]: I1124 08:11:49.850910 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5s864\" (UniqueName: \"kubernetes.io/projected/7d275bbe-d927-40c6-83b6-ad6da7f2a83c-kube-api-access-5s864\") pod \"ovsdbserver-nb-0\" (UID: \"7d275bbe-d927-40c6-83b6-ad6da7f2a83c\") " pod="openstack/ovsdbserver-nb-0" Nov 24 08:11:49 crc kubenswrapper[4691]: I1124 08:11:49.850934 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/7d275bbe-d927-40c6-83b6-ad6da7f2a83c-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"7d275bbe-d927-40c6-83b6-ad6da7f2a83c\") " pod="openstack/ovsdbserver-nb-0" Nov 24 08:11:49 crc kubenswrapper[4691]: I1124 08:11:49.850986 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"7d275bbe-d927-40c6-83b6-ad6da7f2a83c\") " pod="openstack/ovsdbserver-nb-0" Nov 24 08:11:49 crc kubenswrapper[4691]: I1124 08:11:49.851130 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d275bbe-d927-40c6-83b6-ad6da7f2a83c-config\") pod \"ovsdbserver-nb-0\" (UID: \"7d275bbe-d927-40c6-83b6-ad6da7f2a83c\") " pod="openstack/ovsdbserver-nb-0" Nov 24 08:11:49 crc kubenswrapper[4691]: I1124 08:11:49.952991 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7d275bbe-d927-40c6-83b6-ad6da7f2a83c-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"7d275bbe-d927-40c6-83b6-ad6da7f2a83c\") " pod="openstack/ovsdbserver-nb-0" Nov 24 08:11:49 crc kubenswrapper[4691]: I1124 08:11:49.953052 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7d275bbe-d927-40c6-83b6-ad6da7f2a83c-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"7d275bbe-d927-40c6-83b6-ad6da7f2a83c\") " pod="openstack/ovsdbserver-nb-0" Nov 24 08:11:49 crc kubenswrapper[4691]: I1124 08:11:49.953076 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7d275bbe-d927-40c6-83b6-ad6da7f2a83c-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"7d275bbe-d927-40c6-83b6-ad6da7f2a83c\") " pod="openstack/ovsdbserver-nb-0" Nov 24 08:11:49 crc kubenswrapper[4691]: I1124 08:11:49.953095 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d275bbe-d927-40c6-83b6-ad6da7f2a83c-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"7d275bbe-d927-40c6-83b6-ad6da7f2a83c\") " pod="openstack/ovsdbserver-nb-0" Nov 24 08:11:49 crc kubenswrapper[4691]: I1124 08:11:49.953110 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5s864\" (UniqueName: \"kubernetes.io/projected/7d275bbe-d927-40c6-83b6-ad6da7f2a83c-kube-api-access-5s864\") pod \"ovsdbserver-nb-0\" (UID: \"7d275bbe-d927-40c6-83b6-ad6da7f2a83c\") " pod="openstack/ovsdbserver-nb-0" Nov 24 08:11:49 crc kubenswrapper[4691]: I1124 08:11:49.953129 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/7d275bbe-d927-40c6-83b6-ad6da7f2a83c-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"7d275bbe-d927-40c6-83b6-ad6da7f2a83c\") " pod="openstack/ovsdbserver-nb-0" Nov 24 08:11:49 crc kubenswrapper[4691]: I1124 08:11:49.953158 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"7d275bbe-d927-40c6-83b6-ad6da7f2a83c\") " pod="openstack/ovsdbserver-nb-0" Nov 24 08:11:49 crc kubenswrapper[4691]: I1124 08:11:49.953195 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d275bbe-d927-40c6-83b6-ad6da7f2a83c-config\") pod \"ovsdbserver-nb-0\" (UID: \"7d275bbe-d927-40c6-83b6-ad6da7f2a83c\") " pod="openstack/ovsdbserver-nb-0" Nov 24 08:11:49 crc kubenswrapper[4691]: I1124 08:11:49.954148 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d275bbe-d927-40c6-83b6-ad6da7f2a83c-config\") pod \"ovsdbserver-nb-0\" (UID: \"7d275bbe-d927-40c6-83b6-ad6da7f2a83c\") " pod="openstack/ovsdbserver-nb-0" Nov 24 08:11:49 crc kubenswrapper[4691]: I1124 08:11:49.955479 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/7d275bbe-d927-40c6-83b6-ad6da7f2a83c-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"7d275bbe-d927-40c6-83b6-ad6da7f2a83c\") " pod="openstack/ovsdbserver-nb-0" Nov 24 08:11:49 crc kubenswrapper[4691]: I1124 08:11:49.956018 4691 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"7d275bbe-d927-40c6-83b6-ad6da7f2a83c\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/ovsdbserver-nb-0" Nov 24 08:11:49 crc kubenswrapper[4691]: I1124 08:11:49.957390 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7d275bbe-d927-40c6-83b6-ad6da7f2a83c-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"7d275bbe-d927-40c6-83b6-ad6da7f2a83c\") " pod="openstack/ovsdbserver-nb-0" Nov 24 08:11:49 crc kubenswrapper[4691]: I1124 08:11:49.969892 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d275bbe-d927-40c6-83b6-ad6da7f2a83c-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"7d275bbe-d927-40c6-83b6-ad6da7f2a83c\") " pod="openstack/ovsdbserver-nb-0" Nov 24 08:11:49 crc kubenswrapper[4691]: I1124 08:11:49.972353 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7d275bbe-d927-40c6-83b6-ad6da7f2a83c-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"7d275bbe-d927-40c6-83b6-ad6da7f2a83c\") " pod="openstack/ovsdbserver-nb-0" Nov 24 08:11:49 crc kubenswrapper[4691]: I1124 08:11:49.973024 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7d275bbe-d927-40c6-83b6-ad6da7f2a83c-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"7d275bbe-d927-40c6-83b6-ad6da7f2a83c\") " pod="openstack/ovsdbserver-nb-0" Nov 24 08:11:49 crc kubenswrapper[4691]: I1124 08:11:49.975525 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5s864\" (UniqueName: \"kubernetes.io/projected/7d275bbe-d927-40c6-83b6-ad6da7f2a83c-kube-api-access-5s864\") pod \"ovsdbserver-nb-0\" (UID: \"7d275bbe-d927-40c6-83b6-ad6da7f2a83c\") " pod="openstack/ovsdbserver-nb-0" Nov 24 08:11:49 crc kubenswrapper[4691]: I1124 08:11:49.982035 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"7d275bbe-d927-40c6-83b6-ad6da7f2a83c\") " pod="openstack/ovsdbserver-nb-0" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.061989 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.701400 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-jknmq"] Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.703655 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jknmq" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.706516 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.706994 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-xf7f5" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.707017 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.716142 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-pkx2n"] Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.718946 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-pkx2n" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.720360 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-jknmq"] Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.734848 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-pkx2n"] Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.784921 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/204a8833-cf7b-491a-b06a-0c983a6aa30a-var-log-ovn\") pod \"ovn-controller-jknmq\" (UID: \"204a8833-cf7b-491a-b06a-0c983a6aa30a\") " pod="openstack/ovn-controller-jknmq" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.784981 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/204a8833-cf7b-491a-b06a-0c983a6aa30a-combined-ca-bundle\") pod \"ovn-controller-jknmq\" (UID: \"204a8833-cf7b-491a-b06a-0c983a6aa30a\") " pod="openstack/ovn-controller-jknmq" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.785003 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/8f3c496c-e0d1-4b16-80e9-fd3c10dacf79-etc-ovs\") pod \"ovn-controller-ovs-pkx2n\" (UID: \"8f3c496c-e0d1-4b16-80e9-fd3c10dacf79\") " pod="openstack/ovn-controller-ovs-pkx2n" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.785035 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/204a8833-cf7b-491a-b06a-0c983a6aa30a-var-run-ovn\") pod \"ovn-controller-jknmq\" (UID: \"204a8833-cf7b-491a-b06a-0c983a6aa30a\") " pod="openstack/ovn-controller-jknmq" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.785060 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8f3c496c-e0d1-4b16-80e9-fd3c10dacf79-scripts\") pod \"ovn-controller-ovs-pkx2n\" (UID: \"8f3c496c-e0d1-4b16-80e9-fd3c10dacf79\") " pod="openstack/ovn-controller-ovs-pkx2n" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.785084 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jgxhw\" (UniqueName: \"kubernetes.io/projected/204a8833-cf7b-491a-b06a-0c983a6aa30a-kube-api-access-jgxhw\") pod \"ovn-controller-jknmq\" (UID: \"204a8833-cf7b-491a-b06a-0c983a6aa30a\") " pod="openstack/ovn-controller-jknmq" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.785105 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/204a8833-cf7b-491a-b06a-0c983a6aa30a-ovn-controller-tls-certs\") pod \"ovn-controller-jknmq\" (UID: \"204a8833-cf7b-491a-b06a-0c983a6aa30a\") " pod="openstack/ovn-controller-jknmq" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.785124 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8f3c496c-e0d1-4b16-80e9-fd3c10dacf79-var-run\") pod \"ovn-controller-ovs-pkx2n\" (UID: \"8f3c496c-e0d1-4b16-80e9-fd3c10dacf79\") " pod="openstack/ovn-controller-ovs-pkx2n" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.785153 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/8f3c496c-e0d1-4b16-80e9-fd3c10dacf79-var-log\") pod \"ovn-controller-ovs-pkx2n\" (UID: \"8f3c496c-e0d1-4b16-80e9-fd3c10dacf79\") " pod="openstack/ovn-controller-ovs-pkx2n" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.785169 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8b9z\" (UniqueName: \"kubernetes.io/projected/8f3c496c-e0d1-4b16-80e9-fd3c10dacf79-kube-api-access-f8b9z\") pod \"ovn-controller-ovs-pkx2n\" (UID: \"8f3c496c-e0d1-4b16-80e9-fd3c10dacf79\") " pod="openstack/ovn-controller-ovs-pkx2n" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.785212 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/204a8833-cf7b-491a-b06a-0c983a6aa30a-scripts\") pod \"ovn-controller-jknmq\" (UID: \"204a8833-cf7b-491a-b06a-0c983a6aa30a\") " pod="openstack/ovn-controller-jknmq" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.785240 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/204a8833-cf7b-491a-b06a-0c983a6aa30a-var-run\") pod \"ovn-controller-jknmq\" (UID: \"204a8833-cf7b-491a-b06a-0c983a6aa30a\") " pod="openstack/ovn-controller-jknmq" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.785260 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/8f3c496c-e0d1-4b16-80e9-fd3c10dacf79-var-lib\") pod \"ovn-controller-ovs-pkx2n\" (UID: \"8f3c496c-e0d1-4b16-80e9-fd3c10dacf79\") " pod="openstack/ovn-controller-ovs-pkx2n" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.886376 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/204a8833-cf7b-491a-b06a-0c983a6aa30a-combined-ca-bundle\") pod \"ovn-controller-jknmq\" (UID: \"204a8833-cf7b-491a-b06a-0c983a6aa30a\") " pod="openstack/ovn-controller-jknmq" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.886434 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/8f3c496c-e0d1-4b16-80e9-fd3c10dacf79-etc-ovs\") pod \"ovn-controller-ovs-pkx2n\" (UID: \"8f3c496c-e0d1-4b16-80e9-fd3c10dacf79\") " pod="openstack/ovn-controller-ovs-pkx2n" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.886495 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/204a8833-cf7b-491a-b06a-0c983a6aa30a-var-run-ovn\") pod \"ovn-controller-jknmq\" (UID: \"204a8833-cf7b-491a-b06a-0c983a6aa30a\") " pod="openstack/ovn-controller-jknmq" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.886538 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8f3c496c-e0d1-4b16-80e9-fd3c10dacf79-scripts\") pod \"ovn-controller-ovs-pkx2n\" (UID: \"8f3c496c-e0d1-4b16-80e9-fd3c10dacf79\") " pod="openstack/ovn-controller-ovs-pkx2n" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.886568 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jgxhw\" (UniqueName: \"kubernetes.io/projected/204a8833-cf7b-491a-b06a-0c983a6aa30a-kube-api-access-jgxhw\") pod \"ovn-controller-jknmq\" (UID: \"204a8833-cf7b-491a-b06a-0c983a6aa30a\") " pod="openstack/ovn-controller-jknmq" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.886596 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/204a8833-cf7b-491a-b06a-0c983a6aa30a-ovn-controller-tls-certs\") pod \"ovn-controller-jknmq\" (UID: \"204a8833-cf7b-491a-b06a-0c983a6aa30a\") " pod="openstack/ovn-controller-jknmq" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.886621 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8f3c496c-e0d1-4b16-80e9-fd3c10dacf79-var-run\") pod \"ovn-controller-ovs-pkx2n\" (UID: \"8f3c496c-e0d1-4b16-80e9-fd3c10dacf79\") " pod="openstack/ovn-controller-ovs-pkx2n" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.886663 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/8f3c496c-e0d1-4b16-80e9-fd3c10dacf79-var-log\") pod \"ovn-controller-ovs-pkx2n\" (UID: \"8f3c496c-e0d1-4b16-80e9-fd3c10dacf79\") " pod="openstack/ovn-controller-ovs-pkx2n" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.886684 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f8b9z\" (UniqueName: \"kubernetes.io/projected/8f3c496c-e0d1-4b16-80e9-fd3c10dacf79-kube-api-access-f8b9z\") pod \"ovn-controller-ovs-pkx2n\" (UID: \"8f3c496c-e0d1-4b16-80e9-fd3c10dacf79\") " pod="openstack/ovn-controller-ovs-pkx2n" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.886731 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/204a8833-cf7b-491a-b06a-0c983a6aa30a-scripts\") pod \"ovn-controller-jknmq\" (UID: \"204a8833-cf7b-491a-b06a-0c983a6aa30a\") " pod="openstack/ovn-controller-jknmq" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.886758 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/204a8833-cf7b-491a-b06a-0c983a6aa30a-var-run\") pod \"ovn-controller-jknmq\" (UID: \"204a8833-cf7b-491a-b06a-0c983a6aa30a\") " pod="openstack/ovn-controller-jknmq" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.886783 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/8f3c496c-e0d1-4b16-80e9-fd3c10dacf79-var-lib\") pod \"ovn-controller-ovs-pkx2n\" (UID: \"8f3c496c-e0d1-4b16-80e9-fd3c10dacf79\") " pod="openstack/ovn-controller-ovs-pkx2n" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.886818 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/204a8833-cf7b-491a-b06a-0c983a6aa30a-var-log-ovn\") pod \"ovn-controller-jknmq\" (UID: \"204a8833-cf7b-491a-b06a-0c983a6aa30a\") " pod="openstack/ovn-controller-jknmq" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.887241 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/204a8833-cf7b-491a-b06a-0c983a6aa30a-var-run-ovn\") pod \"ovn-controller-jknmq\" (UID: \"204a8833-cf7b-491a-b06a-0c983a6aa30a\") " pod="openstack/ovn-controller-jknmq" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.887315 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/8f3c496c-e0d1-4b16-80e9-fd3c10dacf79-var-log\") pod \"ovn-controller-ovs-pkx2n\" (UID: \"8f3c496c-e0d1-4b16-80e9-fd3c10dacf79\") " pod="openstack/ovn-controller-ovs-pkx2n" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.887503 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/8f3c496c-e0d1-4b16-80e9-fd3c10dacf79-var-lib\") pod \"ovn-controller-ovs-pkx2n\" (UID: \"8f3c496c-e0d1-4b16-80e9-fd3c10dacf79\") " pod="openstack/ovn-controller-ovs-pkx2n" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.887545 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/204a8833-cf7b-491a-b06a-0c983a6aa30a-var-run\") pod \"ovn-controller-jknmq\" (UID: \"204a8833-cf7b-491a-b06a-0c983a6aa30a\") " pod="openstack/ovn-controller-jknmq" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.887919 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8f3c496c-e0d1-4b16-80e9-fd3c10dacf79-var-run\") pod \"ovn-controller-ovs-pkx2n\" (UID: \"8f3c496c-e0d1-4b16-80e9-fd3c10dacf79\") " pod="openstack/ovn-controller-ovs-pkx2n" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.887331 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/204a8833-cf7b-491a-b06a-0c983a6aa30a-var-log-ovn\") pod \"ovn-controller-jknmq\" (UID: \"204a8833-cf7b-491a-b06a-0c983a6aa30a\") " pod="openstack/ovn-controller-jknmq" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.889866 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8f3c496c-e0d1-4b16-80e9-fd3c10dacf79-scripts\") pod \"ovn-controller-ovs-pkx2n\" (UID: \"8f3c496c-e0d1-4b16-80e9-fd3c10dacf79\") " pod="openstack/ovn-controller-ovs-pkx2n" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.889925 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/8f3c496c-e0d1-4b16-80e9-fd3c10dacf79-etc-ovs\") pod \"ovn-controller-ovs-pkx2n\" (UID: \"8f3c496c-e0d1-4b16-80e9-fd3c10dacf79\") " pod="openstack/ovn-controller-ovs-pkx2n" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.890149 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/204a8833-cf7b-491a-b06a-0c983a6aa30a-scripts\") pod \"ovn-controller-jknmq\" (UID: \"204a8833-cf7b-491a-b06a-0c983a6aa30a\") " pod="openstack/ovn-controller-jknmq" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.892861 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/204a8833-cf7b-491a-b06a-0c983a6aa30a-ovn-controller-tls-certs\") pod \"ovn-controller-jknmq\" (UID: \"204a8833-cf7b-491a-b06a-0c983a6aa30a\") " pod="openstack/ovn-controller-jknmq" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.903719 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/204a8833-cf7b-491a-b06a-0c983a6aa30a-combined-ca-bundle\") pod \"ovn-controller-jknmq\" (UID: \"204a8833-cf7b-491a-b06a-0c983a6aa30a\") " pod="openstack/ovn-controller-jknmq" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.905223 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jgxhw\" (UniqueName: \"kubernetes.io/projected/204a8833-cf7b-491a-b06a-0c983a6aa30a-kube-api-access-jgxhw\") pod \"ovn-controller-jknmq\" (UID: \"204a8833-cf7b-491a-b06a-0c983a6aa30a\") " pod="openstack/ovn-controller-jknmq" Nov 24 08:11:50 crc kubenswrapper[4691]: I1124 08:11:50.910275 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f8b9z\" (UniqueName: \"kubernetes.io/projected/8f3c496c-e0d1-4b16-80e9-fd3c10dacf79-kube-api-access-f8b9z\") pod \"ovn-controller-ovs-pkx2n\" (UID: \"8f3c496c-e0d1-4b16-80e9-fd3c10dacf79\") " pod="openstack/ovn-controller-ovs-pkx2n" Nov 24 08:11:51 crc kubenswrapper[4691]: I1124 08:11:51.033168 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jknmq" Nov 24 08:11:51 crc kubenswrapper[4691]: I1124 08:11:51.045794 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-pkx2n" Nov 24 08:11:53 crc kubenswrapper[4691]: I1124 08:11:53.843941 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 24 08:11:53 crc kubenswrapper[4691]: I1124 08:11:53.848000 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 24 08:11:53 crc kubenswrapper[4691]: I1124 08:11:53.852081 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 24 08:11:53 crc kubenswrapper[4691]: I1124 08:11:53.852145 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 24 08:11:53 crc kubenswrapper[4691]: I1124 08:11:53.852620 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-ghjpl" Nov 24 08:11:53 crc kubenswrapper[4691]: I1124 08:11:53.853189 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 24 08:11:53 crc kubenswrapper[4691]: I1124 08:11:53.862398 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 24 08:11:53 crc kubenswrapper[4691]: I1124 08:11:53.948974 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4htql\" (UniqueName: \"kubernetes.io/projected/57b5f932-160d-453a-ad0b-2b111085fda8-kube-api-access-4htql\") pod \"ovsdbserver-sb-0\" (UID: \"57b5f932-160d-453a-ad0b-2b111085fda8\") " pod="openstack/ovsdbserver-sb-0" Nov 24 08:11:53 crc kubenswrapper[4691]: I1124 08:11:53.949060 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/57b5f932-160d-453a-ad0b-2b111085fda8-config\") pod \"ovsdbserver-sb-0\" (UID: \"57b5f932-160d-453a-ad0b-2b111085fda8\") " pod="openstack/ovsdbserver-sb-0" Nov 24 08:11:53 crc kubenswrapper[4691]: I1124 08:11:53.949104 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-sb-0\" (UID: \"57b5f932-160d-453a-ad0b-2b111085fda8\") " pod="openstack/ovsdbserver-sb-0" Nov 24 08:11:53 crc kubenswrapper[4691]: I1124 08:11:53.949319 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/57b5f932-160d-453a-ad0b-2b111085fda8-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"57b5f932-160d-453a-ad0b-2b111085fda8\") " pod="openstack/ovsdbserver-sb-0" Nov 24 08:11:53 crc kubenswrapper[4691]: I1124 08:11:53.949507 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/57b5f932-160d-453a-ad0b-2b111085fda8-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"57b5f932-160d-453a-ad0b-2b111085fda8\") " pod="openstack/ovsdbserver-sb-0" Nov 24 08:11:53 crc kubenswrapper[4691]: I1124 08:11:53.949590 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/57b5f932-160d-453a-ad0b-2b111085fda8-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"57b5f932-160d-453a-ad0b-2b111085fda8\") " pod="openstack/ovsdbserver-sb-0" Nov 24 08:11:53 crc kubenswrapper[4691]: I1124 08:11:53.949669 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57b5f932-160d-453a-ad0b-2b111085fda8-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"57b5f932-160d-453a-ad0b-2b111085fda8\") " pod="openstack/ovsdbserver-sb-0" Nov 24 08:11:53 crc kubenswrapper[4691]: I1124 08:11:53.949906 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/57b5f932-160d-453a-ad0b-2b111085fda8-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"57b5f932-160d-453a-ad0b-2b111085fda8\") " pod="openstack/ovsdbserver-sb-0" Nov 24 08:11:54 crc kubenswrapper[4691]: I1124 08:11:54.051271 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-sb-0\" (UID: \"57b5f932-160d-453a-ad0b-2b111085fda8\") " pod="openstack/ovsdbserver-sb-0" Nov 24 08:11:54 crc kubenswrapper[4691]: I1124 08:11:54.051369 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/57b5f932-160d-453a-ad0b-2b111085fda8-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"57b5f932-160d-453a-ad0b-2b111085fda8\") " pod="openstack/ovsdbserver-sb-0" Nov 24 08:11:54 crc kubenswrapper[4691]: I1124 08:11:54.051418 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/57b5f932-160d-453a-ad0b-2b111085fda8-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"57b5f932-160d-453a-ad0b-2b111085fda8\") " pod="openstack/ovsdbserver-sb-0" Nov 24 08:11:54 crc kubenswrapper[4691]: I1124 08:11:54.051463 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/57b5f932-160d-453a-ad0b-2b111085fda8-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"57b5f932-160d-453a-ad0b-2b111085fda8\") " pod="openstack/ovsdbserver-sb-0" Nov 24 08:11:54 crc kubenswrapper[4691]: I1124 08:11:54.051491 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57b5f932-160d-453a-ad0b-2b111085fda8-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"57b5f932-160d-453a-ad0b-2b111085fda8\") " pod="openstack/ovsdbserver-sb-0" Nov 24 08:11:54 crc kubenswrapper[4691]: I1124 08:11:54.051545 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/57b5f932-160d-453a-ad0b-2b111085fda8-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"57b5f932-160d-453a-ad0b-2b111085fda8\") " pod="openstack/ovsdbserver-sb-0" Nov 24 08:11:54 crc kubenswrapper[4691]: I1124 08:11:54.051574 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4htql\" (UniqueName: \"kubernetes.io/projected/57b5f932-160d-453a-ad0b-2b111085fda8-kube-api-access-4htql\") pod \"ovsdbserver-sb-0\" (UID: \"57b5f932-160d-453a-ad0b-2b111085fda8\") " pod="openstack/ovsdbserver-sb-0" Nov 24 08:11:54 crc kubenswrapper[4691]: I1124 08:11:54.051602 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/57b5f932-160d-453a-ad0b-2b111085fda8-config\") pod \"ovsdbserver-sb-0\" (UID: \"57b5f932-160d-453a-ad0b-2b111085fda8\") " pod="openstack/ovsdbserver-sb-0" Nov 24 08:11:54 crc kubenswrapper[4691]: I1124 08:11:54.051875 4691 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-sb-0\" (UID: \"57b5f932-160d-453a-ad0b-2b111085fda8\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/ovsdbserver-sb-0" Nov 24 08:11:54 crc kubenswrapper[4691]: I1124 08:11:54.052200 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/57b5f932-160d-453a-ad0b-2b111085fda8-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"57b5f932-160d-453a-ad0b-2b111085fda8\") " pod="openstack/ovsdbserver-sb-0" Nov 24 08:11:54 crc kubenswrapper[4691]: I1124 08:11:54.052694 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/57b5f932-160d-453a-ad0b-2b111085fda8-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"57b5f932-160d-453a-ad0b-2b111085fda8\") " pod="openstack/ovsdbserver-sb-0" Nov 24 08:11:54 crc kubenswrapper[4691]: I1124 08:11:54.052852 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/57b5f932-160d-453a-ad0b-2b111085fda8-config\") pod \"ovsdbserver-sb-0\" (UID: \"57b5f932-160d-453a-ad0b-2b111085fda8\") " pod="openstack/ovsdbserver-sb-0" Nov 24 08:11:54 crc kubenswrapper[4691]: I1124 08:11:54.058374 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57b5f932-160d-453a-ad0b-2b111085fda8-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"57b5f932-160d-453a-ad0b-2b111085fda8\") " pod="openstack/ovsdbserver-sb-0" Nov 24 08:11:54 crc kubenswrapper[4691]: I1124 08:11:54.059640 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/57b5f932-160d-453a-ad0b-2b111085fda8-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"57b5f932-160d-453a-ad0b-2b111085fda8\") " pod="openstack/ovsdbserver-sb-0" Nov 24 08:11:54 crc kubenswrapper[4691]: I1124 08:11:54.065507 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/57b5f932-160d-453a-ad0b-2b111085fda8-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"57b5f932-160d-453a-ad0b-2b111085fda8\") " pod="openstack/ovsdbserver-sb-0" Nov 24 08:11:54 crc kubenswrapper[4691]: I1124 08:11:54.070662 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4htql\" (UniqueName: \"kubernetes.io/projected/57b5f932-160d-453a-ad0b-2b111085fda8-kube-api-access-4htql\") pod \"ovsdbserver-sb-0\" (UID: \"57b5f932-160d-453a-ad0b-2b111085fda8\") " pod="openstack/ovsdbserver-sb-0" Nov 24 08:11:54 crc kubenswrapper[4691]: I1124 08:11:54.072221 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-sb-0\" (UID: \"57b5f932-160d-453a-ad0b-2b111085fda8\") " pod="openstack/ovsdbserver-sb-0" Nov 24 08:11:54 crc kubenswrapper[4691]: I1124 08:11:54.138565 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"60038211-87c8-4170-8fd0-35df8a16aa92","Type":"ContainerStarted","Data":"3ff9ab7db9deb0e0331e70d165b6281d4565ece69c5f21954e24f4dec35406b2"} Nov 24 08:11:54 crc kubenswrapper[4691]: I1124 08:11:54.188015 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 24 08:11:55 crc kubenswrapper[4691]: I1124 08:11:55.831350 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 24 08:11:55 crc kubenswrapper[4691]: W1124 08:11:55.835910 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod224d72d8_5d0a_48df_8930_2cb28fc1fd93.slice/crio-9170c4b9b023350c74d9e3bb8519cf8438cf56de5a0e0183f2c87210001de793 WatchSource:0}: Error finding container 9170c4b9b023350c74d9e3bb8519cf8438cf56de5a0e0183f2c87210001de793: Status 404 returned error can't find the container with id 9170c4b9b023350c74d9e3bb8519cf8438cf56de5a0e0183f2c87210001de793 Nov 24 08:11:55 crc kubenswrapper[4691]: I1124 08:11:55.840152 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 24 08:11:56 crc kubenswrapper[4691]: I1124 08:11:56.023631 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 24 08:11:56 crc kubenswrapper[4691]: I1124 08:11:56.030098 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 24 08:11:56 crc kubenswrapper[4691]: W1124 08:11:56.034291 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcb7ce1da_e87a_4d10_b6ad_f9f2e0d022b4.slice/crio-2dee08ebc0f93697a5efcb4a8d1dfd7c26a0764d10e8012b2442d0e2e19eabb0 WatchSource:0}: Error finding container 2dee08ebc0f93697a5efcb4a8d1dfd7c26a0764d10e8012b2442d0e2e19eabb0: Status 404 returned error can't find the container with id 2dee08ebc0f93697a5efcb4a8d1dfd7c26a0764d10e8012b2442d0e2e19eabb0 Nov 24 08:11:56 crc kubenswrapper[4691]: I1124 08:11:56.140240 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 24 08:11:56 crc kubenswrapper[4691]: W1124 08:11:56.147404 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod57b5f932_160d_453a_ad0b_2b111085fda8.slice/crio-82f1b2fd412e9022506975de258dd168c44f2783a77fc052b8f4c8d1e4eb96da WatchSource:0}: Error finding container 82f1b2fd412e9022506975de258dd168c44f2783a77fc052b8f4c8d1e4eb96da: Status 404 returned error can't find the container with id 82f1b2fd412e9022506975de258dd168c44f2783a77fc052b8f4c8d1e4eb96da Nov 24 08:11:56 crc kubenswrapper[4691]: I1124 08:11:56.216446 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-jknmq"] Nov 24 08:11:56 crc kubenswrapper[4691]: I1124 08:11:56.222880 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 24 08:11:56 crc kubenswrapper[4691]: I1124 08:11:56.228586 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"224d72d8-5d0a-48df-8930-2cb28fc1fd93","Type":"ContainerStarted","Data":"9170c4b9b023350c74d9e3bb8519cf8438cf56de5a0e0183f2c87210001de793"} Nov 24 08:11:56 crc kubenswrapper[4691]: I1124 08:11:56.230940 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"cb7ce1da-e87a-4d10-b6ad-f9f2e0d022b4","Type":"ContainerStarted","Data":"2dee08ebc0f93697a5efcb4a8d1dfd7c26a0764d10e8012b2442d0e2e19eabb0"} Nov 24 08:11:56 crc kubenswrapper[4691]: W1124 08:11:56.231559 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod204a8833_cf7b_491a_b06a_0c983a6aa30a.slice/crio-0104c235993426990b571ce122005b73ff603067a0404d45ad2e758c56041868 WatchSource:0}: Error finding container 0104c235993426990b571ce122005b73ff603067a0404d45ad2e758c56041868: Status 404 returned error can't find the container with id 0104c235993426990b571ce122005b73ff603067a0404d45ad2e758c56041868 Nov 24 08:11:56 crc kubenswrapper[4691]: I1124 08:11:56.233818 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"5021ba85-77e5-4fc8-8816-5ad1587b82e5","Type":"ContainerStarted","Data":"fc8eec502c022566cd218aef14e89c7c39d1ba03ab22724cefd812b11cb4c5a9"} Nov 24 08:11:56 crc kubenswrapper[4691]: I1124 08:11:56.239026 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"57b5f932-160d-453a-ad0b-2b111085fda8","Type":"ContainerStarted","Data":"82f1b2fd412e9022506975de258dd168c44f2783a77fc052b8f4c8d1e4eb96da"} Nov 24 08:11:56 crc kubenswrapper[4691]: I1124 08:11:56.244054 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"be26bfeb-e0f8-4c67-8938-55d8399b717c","Type":"ContainerStarted","Data":"af7f427c602a6c50b9e86e0c35f5734e733bdeebabb5691b8ec68777b5d1a759"} Nov 24 08:11:56 crc kubenswrapper[4691]: I1124 08:11:56.324361 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 24 08:11:56 crc kubenswrapper[4691]: I1124 08:11:56.398426 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-pkx2n"] Nov 24 08:11:56 crc kubenswrapper[4691]: E1124 08:11:56.574355 4691 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 24 08:11:56 crc kubenswrapper[4691]: E1124 08:11:56.574606 4691 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9wbcs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-5x2mn_openstack(40aec6d7-a3f1-461a-988b-8f87be6ad1a9): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 08:11:56 crc kubenswrapper[4691]: E1124 08:11:56.575866 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-5x2mn" podUID="40aec6d7-a3f1-461a-988b-8f87be6ad1a9" Nov 24 08:11:56 crc kubenswrapper[4691]: E1124 08:11:56.902998 4691 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 24 08:11:56 crc kubenswrapper[4691]: E1124 08:11:56.903901 4691 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-x28rc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-7pjkz_openstack(2ed705ca-195e-4f31-a76d-529ec10e4e2f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 08:11:56 crc kubenswrapper[4691]: E1124 08:11:56.905116 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-7pjkz" podUID="2ed705ca-195e-4f31-a76d-529ec10e4e2f" Nov 24 08:11:57 crc kubenswrapper[4691]: I1124 08:11:57.257062 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-jknmq" event={"ID":"204a8833-cf7b-491a-b06a-0c983a6aa30a","Type":"ContainerStarted","Data":"0104c235993426990b571ce122005b73ff603067a0404d45ad2e758c56041868"} Nov 24 08:11:57 crc kubenswrapper[4691]: I1124 08:11:57.259180 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"7d275bbe-d927-40c6-83b6-ad6da7f2a83c","Type":"ContainerStarted","Data":"b7300f22ab453ecb7b81e5b202888037fbe91748cec76e9703f8e961cdfa9f5d"} Nov 24 08:11:57 crc kubenswrapper[4691]: I1124 08:11:57.260988 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"8c2f2c51-cf66-4a86-917b-52d20691e85b","Type":"ContainerStarted","Data":"b030a77a4051ef6ca1f6bcdb96ef5d6b7db13a61a886cac12e2809a181caacea"} Nov 24 08:11:57 crc kubenswrapper[4691]: I1124 08:11:57.263629 4691 generic.go:334] "Generic (PLEG): container finished" podID="1759554c-7f60-492d-b11b-8cb45b0cc5be" containerID="81353da05a9c338bfab618f3ae658041a07ae96f83fde4c45e9a98d612a2ee72" exitCode=0 Nov 24 08:11:57 crc kubenswrapper[4691]: I1124 08:11:57.263702 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-dv56l" event={"ID":"1759554c-7f60-492d-b11b-8cb45b0cc5be","Type":"ContainerDied","Data":"81353da05a9c338bfab618f3ae658041a07ae96f83fde4c45e9a98d612a2ee72"} Nov 24 08:11:57 crc kubenswrapper[4691]: I1124 08:11:57.269028 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-nxrrf" event={"ID":"282ca99b-ddc6-4450-8f8c-6a8e40144d35","Type":"ContainerDied","Data":"d89b820dc7e991a20d9a7b26e4cbda21da3e0b59b6c664adab6c93a68886e173"} Nov 24 08:11:57 crc kubenswrapper[4691]: I1124 08:11:57.268850 4691 generic.go:334] "Generic (PLEG): container finished" podID="282ca99b-ddc6-4450-8f8c-6a8e40144d35" containerID="d89b820dc7e991a20d9a7b26e4cbda21da3e0b59b6c664adab6c93a68886e173" exitCode=0 Nov 24 08:11:57 crc kubenswrapper[4691]: I1124 08:11:57.274036 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-pkx2n" event={"ID":"8f3c496c-e0d1-4b16-80e9-fd3c10dacf79","Type":"ContainerStarted","Data":"159b4b8d496e08161cd1ceac17302869e312768c349858d4282566c20f0f63bf"} Nov 24 08:11:58 crc kubenswrapper[4691]: I1124 08:11:58.292189 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-dv56l" event={"ID":"1759554c-7f60-492d-b11b-8cb45b0cc5be","Type":"ContainerStarted","Data":"0383e28b2d033834a11e0d695028ae284e39a2479366eb159d27b7e4535a2ed8"} Nov 24 08:11:58 crc kubenswrapper[4691]: I1124 08:11:58.293134 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-666b6646f7-dv56l" Nov 24 08:11:58 crc kubenswrapper[4691]: I1124 08:11:58.316322 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-666b6646f7-dv56l" podStartSLOduration=3.066947041 podStartE2EDuration="18.316297345s" podCreationTimestamp="2025-11-24 08:11:40 +0000 UTC" firstStartedPulling="2025-11-24 08:11:41.526309959 +0000 UTC m=+863.525259198" lastFinishedPulling="2025-11-24 08:11:56.775660253 +0000 UTC m=+878.774609502" observedRunningTime="2025-11-24 08:11:58.312804664 +0000 UTC m=+880.311753913" watchObservedRunningTime="2025-11-24 08:11:58.316297345 +0000 UTC m=+880.315246594" Nov 24 08:11:59 crc kubenswrapper[4691]: I1124 08:11:59.125928 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-7pjkz" Nov 24 08:11:59 crc kubenswrapper[4691]: I1124 08:11:59.273486 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ed705ca-195e-4f31-a76d-529ec10e4e2f-config\") pod \"2ed705ca-195e-4f31-a76d-529ec10e4e2f\" (UID: \"2ed705ca-195e-4f31-a76d-529ec10e4e2f\") " Nov 24 08:11:59 crc kubenswrapper[4691]: I1124 08:11:59.273600 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ed705ca-195e-4f31-a76d-529ec10e4e2f-dns-svc\") pod \"2ed705ca-195e-4f31-a76d-529ec10e4e2f\" (UID: \"2ed705ca-195e-4f31-a76d-529ec10e4e2f\") " Nov 24 08:11:59 crc kubenswrapper[4691]: I1124 08:11:59.273668 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x28rc\" (UniqueName: \"kubernetes.io/projected/2ed705ca-195e-4f31-a76d-529ec10e4e2f-kube-api-access-x28rc\") pod \"2ed705ca-195e-4f31-a76d-529ec10e4e2f\" (UID: \"2ed705ca-195e-4f31-a76d-529ec10e4e2f\") " Nov 24 08:11:59 crc kubenswrapper[4691]: I1124 08:11:59.274668 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ed705ca-195e-4f31-a76d-529ec10e4e2f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2ed705ca-195e-4f31-a76d-529ec10e4e2f" (UID: "2ed705ca-195e-4f31-a76d-529ec10e4e2f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:11:59 crc kubenswrapper[4691]: I1124 08:11:59.274797 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ed705ca-195e-4f31-a76d-529ec10e4e2f-config" (OuterVolumeSpecName: "config") pod "2ed705ca-195e-4f31-a76d-529ec10e4e2f" (UID: "2ed705ca-195e-4f31-a76d-529ec10e4e2f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:11:59 crc kubenswrapper[4691]: I1124 08:11:59.281561 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ed705ca-195e-4f31-a76d-529ec10e4e2f-kube-api-access-x28rc" (OuterVolumeSpecName: "kube-api-access-x28rc") pod "2ed705ca-195e-4f31-a76d-529ec10e4e2f" (UID: "2ed705ca-195e-4f31-a76d-529ec10e4e2f"). InnerVolumeSpecName "kube-api-access-x28rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:11:59 crc kubenswrapper[4691]: I1124 08:11:59.303074 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-7pjkz" Nov 24 08:11:59 crc kubenswrapper[4691]: I1124 08:11:59.303087 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-7pjkz" event={"ID":"2ed705ca-195e-4f31-a76d-529ec10e4e2f","Type":"ContainerDied","Data":"2c2ce8038b5c113f203a4930c8950d6d1c2e31fffbd42a3f586af487ae7d8503"} Nov 24 08:11:59 crc kubenswrapper[4691]: I1124 08:11:59.368094 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-7pjkz"] Nov 24 08:11:59 crc kubenswrapper[4691]: I1124 08:11:59.376118 4691 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ed705ca-195e-4f31-a76d-529ec10e4e2f-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 08:11:59 crc kubenswrapper[4691]: I1124 08:11:59.376144 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x28rc\" (UniqueName: \"kubernetes.io/projected/2ed705ca-195e-4f31-a76d-529ec10e4e2f-kube-api-access-x28rc\") on node \"crc\" DevicePath \"\"" Nov 24 08:11:59 crc kubenswrapper[4691]: I1124 08:11:59.376156 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ed705ca-195e-4f31-a76d-529ec10e4e2f-config\") on node \"crc\" DevicePath \"\"" Nov 24 08:11:59 crc kubenswrapper[4691]: I1124 08:11:59.376232 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-7pjkz"] Nov 24 08:11:59 crc kubenswrapper[4691]: I1124 08:11:59.518923 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-5x2mn" Nov 24 08:11:59 crc kubenswrapper[4691]: I1124 08:11:59.680752 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40aec6d7-a3f1-461a-988b-8f87be6ad1a9-config\") pod \"40aec6d7-a3f1-461a-988b-8f87be6ad1a9\" (UID: \"40aec6d7-a3f1-461a-988b-8f87be6ad1a9\") " Nov 24 08:11:59 crc kubenswrapper[4691]: I1124 08:11:59.681320 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/40aec6d7-a3f1-461a-988b-8f87be6ad1a9-config" (OuterVolumeSpecName: "config") pod "40aec6d7-a3f1-461a-988b-8f87be6ad1a9" (UID: "40aec6d7-a3f1-461a-988b-8f87be6ad1a9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:11:59 crc kubenswrapper[4691]: I1124 08:11:59.681536 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9wbcs\" (UniqueName: \"kubernetes.io/projected/40aec6d7-a3f1-461a-988b-8f87be6ad1a9-kube-api-access-9wbcs\") pod \"40aec6d7-a3f1-461a-988b-8f87be6ad1a9\" (UID: \"40aec6d7-a3f1-461a-988b-8f87be6ad1a9\") " Nov 24 08:11:59 crc kubenswrapper[4691]: I1124 08:11:59.683210 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40aec6d7-a3f1-461a-988b-8f87be6ad1a9-config\") on node \"crc\" DevicePath \"\"" Nov 24 08:11:59 crc kubenswrapper[4691]: I1124 08:11:59.685376 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40aec6d7-a3f1-461a-988b-8f87be6ad1a9-kube-api-access-9wbcs" (OuterVolumeSpecName: "kube-api-access-9wbcs") pod "40aec6d7-a3f1-461a-988b-8f87be6ad1a9" (UID: "40aec6d7-a3f1-461a-988b-8f87be6ad1a9"). InnerVolumeSpecName "kube-api-access-9wbcs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:11:59 crc kubenswrapper[4691]: I1124 08:11:59.785362 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9wbcs\" (UniqueName: \"kubernetes.io/projected/40aec6d7-a3f1-461a-988b-8f87be6ad1a9-kube-api-access-9wbcs\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:00 crc kubenswrapper[4691]: I1124 08:12:00.314605 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-5x2mn" event={"ID":"40aec6d7-a3f1-461a-988b-8f87be6ad1a9","Type":"ContainerDied","Data":"049925ae025ec8003daad907304673e9c5851762fb0a2a3df4742afc862edf8f"} Nov 24 08:12:00 crc kubenswrapper[4691]: I1124 08:12:00.314686 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-5x2mn" Nov 24 08:12:00 crc kubenswrapper[4691]: I1124 08:12:00.377571 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-5x2mn"] Nov 24 08:12:00 crc kubenswrapper[4691]: I1124 08:12:00.384747 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-5x2mn"] Nov 24 08:12:00 crc kubenswrapper[4691]: I1124 08:12:00.769340 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ed705ca-195e-4f31-a76d-529ec10e4e2f" path="/var/lib/kubelet/pods/2ed705ca-195e-4f31-a76d-529ec10e4e2f/volumes" Nov 24 08:12:00 crc kubenswrapper[4691]: I1124 08:12:00.769846 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40aec6d7-a3f1-461a-988b-8f87be6ad1a9" path="/var/lib/kubelet/pods/40aec6d7-a3f1-461a-988b-8f87be6ad1a9/volumes" Nov 24 08:12:05 crc kubenswrapper[4691]: I1124 08:12:05.910751 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-666b6646f7-dv56l" Nov 24 08:12:06 crc kubenswrapper[4691]: I1124 08:12:06.364337 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-nxrrf" event={"ID":"282ca99b-ddc6-4450-8f8c-6a8e40144d35","Type":"ContainerStarted","Data":"16dbcab89fb2cac2d066a474a4404a9591de7b02220ea4458a8485d8d875d763"} Nov 24 08:12:06 crc kubenswrapper[4691]: I1124 08:12:06.364722 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57d769cc4f-nxrrf" Nov 24 08:12:06 crc kubenswrapper[4691]: I1124 08:12:06.387875 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57d769cc4f-nxrrf" podStartSLOduration=14.63203991 podStartE2EDuration="26.387858022s" podCreationTimestamp="2025-11-24 08:11:40 +0000 UTC" firstStartedPulling="2025-11-24 08:11:44.920501827 +0000 UTC m=+866.919451076" lastFinishedPulling="2025-11-24 08:11:56.676319939 +0000 UTC m=+878.675269188" observedRunningTime="2025-11-24 08:12:06.386121411 +0000 UTC m=+888.385070660" watchObservedRunningTime="2025-11-24 08:12:06.387858022 +0000 UTC m=+888.386807271" Nov 24 08:12:07 crc kubenswrapper[4691]: I1124 08:12:07.392962 4691 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 08:12:08 crc kubenswrapper[4691]: I1124 08:12:08.380160 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"224d72d8-5d0a-48df-8930-2cb28fc1fd93","Type":"ContainerStarted","Data":"b2ba845015f693518c179a10b585e65cf75a15ce74c8f20767c76c2d6e35d283"} Nov 24 08:12:08 crc kubenswrapper[4691]: I1124 08:12:08.382717 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"60038211-87c8-4170-8fd0-35df8a16aa92","Type":"ContainerStarted","Data":"24e84f0b81dd6e2ede17b256a9363d2077a55960ca9ab99278dcc179fc0c7785"} Nov 24 08:12:08 crc kubenswrapper[4691]: I1124 08:12:08.386051 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"5021ba85-77e5-4fc8-8816-5ad1587b82e5","Type":"ContainerStarted","Data":"ec01f58affbe1eea667e7af5646babe5aa87775b6d93c54fa388578b6e802dc5"} Nov 24 08:12:08 crc kubenswrapper[4691]: I1124 08:12:08.389411 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-pkx2n" event={"ID":"8f3c496c-e0d1-4b16-80e9-fd3c10dacf79","Type":"ContainerStarted","Data":"12fe0ba344316b287dceb91e669b0d2b7e5480a2657bfaf1cf1d3c09185916ff"} Nov 24 08:12:08 crc kubenswrapper[4691]: I1124 08:12:08.393798 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-jknmq" event={"ID":"204a8833-cf7b-491a-b06a-0c983a6aa30a","Type":"ContainerStarted","Data":"8f550cfb71cf2acddabb68a3e6e3fd384c38d39fd0c7a61563915d1458b02c45"} Nov 24 08:12:08 crc kubenswrapper[4691]: I1124 08:12:08.393972 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-jknmq" Nov 24 08:12:08 crc kubenswrapper[4691]: I1124 08:12:08.396354 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"cb7ce1da-e87a-4d10-b6ad-f9f2e0d022b4","Type":"ContainerStarted","Data":"7954fa644555dec50b859561989324a6c86ee67a617c42f523cc9af1bc664121"} Nov 24 08:12:08 crc kubenswrapper[4691]: I1124 08:12:08.396790 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 24 08:12:08 crc kubenswrapper[4691]: I1124 08:12:08.402049 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"7d275bbe-d927-40c6-83b6-ad6da7f2a83c","Type":"ContainerStarted","Data":"1a7fd66ff601c79637291af8dfd1fececa6a6e43e8d1a4b85618b23f069f35f8"} Nov 24 08:12:08 crc kubenswrapper[4691]: I1124 08:12:08.406892 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"57b5f932-160d-453a-ad0b-2b111085fda8","Type":"ContainerStarted","Data":"9bb17f6fe38e8c0032c7c8618d68e59417b7f87c753ae4b05877a8b2c080f01e"} Nov 24 08:12:08 crc kubenswrapper[4691]: I1124 08:12:08.408697 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"be26bfeb-e0f8-4c67-8938-55d8399b717c","Type":"ContainerStarted","Data":"f4c1d46e906360401f57b95b1f644d7cfa1f723797115726c746e7a0ca7e3813"} Nov 24 08:12:08 crc kubenswrapper[4691]: I1124 08:12:08.411404 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"8c2f2c51-cf66-4a86-917b-52d20691e85b","Type":"ContainerStarted","Data":"572a4e99a55305c32a880ded67e24134685c77bf7f3fdd944a003e601c95da3f"} Nov 24 08:12:08 crc kubenswrapper[4691]: I1124 08:12:08.413127 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 24 08:12:08 crc kubenswrapper[4691]: I1124 08:12:08.547941 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=15.176537572 podStartE2EDuration="24.547905681s" podCreationTimestamp="2025-11-24 08:11:44 +0000 UTC" firstStartedPulling="2025-11-24 08:11:56.037724756 +0000 UTC m=+878.036674005" lastFinishedPulling="2025-11-24 08:12:05.409092865 +0000 UTC m=+887.408042114" observedRunningTime="2025-11-24 08:12:08.547039176 +0000 UTC m=+890.545988435" watchObservedRunningTime="2025-11-24 08:12:08.547905681 +0000 UTC m=+890.546854930" Nov 24 08:12:08 crc kubenswrapper[4691]: I1124 08:12:08.573658 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=12.105404134 podStartE2EDuration="22.573641928s" podCreationTimestamp="2025-11-24 08:11:46 +0000 UTC" firstStartedPulling="2025-11-24 08:11:56.238110841 +0000 UTC m=+878.237060110" lastFinishedPulling="2025-11-24 08:12:06.706348655 +0000 UTC m=+888.705297904" observedRunningTime="2025-11-24 08:12:08.57095222 +0000 UTC m=+890.569901469" watchObservedRunningTime="2025-11-24 08:12:08.573641928 +0000 UTC m=+890.572591167" Nov 24 08:12:08 crc kubenswrapper[4691]: I1124 08:12:08.599078 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-jknmq" podStartSLOduration=8.823190216 podStartE2EDuration="18.599046156s" podCreationTimestamp="2025-11-24 08:11:50 +0000 UTC" firstStartedPulling="2025-11-24 08:11:56.237423541 +0000 UTC m=+878.236372810" lastFinishedPulling="2025-11-24 08:12:06.013279501 +0000 UTC m=+888.012228750" observedRunningTime="2025-11-24 08:12:08.588269913 +0000 UTC m=+890.587219162" watchObservedRunningTime="2025-11-24 08:12:08.599046156 +0000 UTC m=+890.597995405" Nov 24 08:12:09 crc kubenswrapper[4691]: I1124 08:12:09.446888 4691 generic.go:334] "Generic (PLEG): container finished" podID="8f3c496c-e0d1-4b16-80e9-fd3c10dacf79" containerID="12fe0ba344316b287dceb91e669b0d2b7e5480a2657bfaf1cf1d3c09185916ff" exitCode=0 Nov 24 08:12:09 crc kubenswrapper[4691]: I1124 08:12:09.448817 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-pkx2n" event={"ID":"8f3c496c-e0d1-4b16-80e9-fd3c10dacf79","Type":"ContainerDied","Data":"12fe0ba344316b287dceb91e669b0d2b7e5480a2657bfaf1cf1d3c09185916ff"} Nov 24 08:12:11 crc kubenswrapper[4691]: I1124 08:12:11.278740 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-57d769cc4f-nxrrf" Nov 24 08:12:11 crc kubenswrapper[4691]: I1124 08:12:11.336492 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-dv56l"] Nov 24 08:12:11 crc kubenswrapper[4691]: I1124 08:12:11.337038 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-666b6646f7-dv56l" podUID="1759554c-7f60-492d-b11b-8cb45b0cc5be" containerName="dnsmasq-dns" containerID="cri-o://0383e28b2d033834a11e0d695028ae284e39a2479366eb159d27b7e4535a2ed8" gracePeriod=10 Nov 24 08:12:11 crc kubenswrapper[4691]: I1124 08:12:11.471919 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"57b5f932-160d-453a-ad0b-2b111085fda8","Type":"ContainerStarted","Data":"5af019f9fc47fe620bd602daf8da0be41344415003c97d56ced88594547a74c3"} Nov 24 08:12:11 crc kubenswrapper[4691]: I1124 08:12:11.474813 4691 generic.go:334] "Generic (PLEG): container finished" podID="be26bfeb-e0f8-4c67-8938-55d8399b717c" containerID="f4c1d46e906360401f57b95b1f644d7cfa1f723797115726c746e7a0ca7e3813" exitCode=0 Nov 24 08:12:11 crc kubenswrapper[4691]: I1124 08:12:11.475055 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"be26bfeb-e0f8-4c67-8938-55d8399b717c","Type":"ContainerDied","Data":"f4c1d46e906360401f57b95b1f644d7cfa1f723797115726c746e7a0ca7e3813"} Nov 24 08:12:11 crc kubenswrapper[4691]: I1124 08:12:11.482514 4691 generic.go:334] "Generic (PLEG): container finished" podID="1759554c-7f60-492d-b11b-8cb45b0cc5be" containerID="0383e28b2d033834a11e0d695028ae284e39a2479366eb159d27b7e4535a2ed8" exitCode=0 Nov 24 08:12:11 crc kubenswrapper[4691]: I1124 08:12:11.482588 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-dv56l" event={"ID":"1759554c-7f60-492d-b11b-8cb45b0cc5be","Type":"ContainerDied","Data":"0383e28b2d033834a11e0d695028ae284e39a2479366eb159d27b7e4535a2ed8"} Nov 24 08:12:11 crc kubenswrapper[4691]: I1124 08:12:11.492248 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-pkx2n" event={"ID":"8f3c496c-e0d1-4b16-80e9-fd3c10dacf79","Type":"ContainerStarted","Data":"befcbfae9105f143ae54a04cbb0cc93764244453f3deee9cdf341dc649e85153"} Nov 24 08:12:11 crc kubenswrapper[4691]: I1124 08:12:11.492317 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-pkx2n" event={"ID":"8f3c496c-e0d1-4b16-80e9-fd3c10dacf79","Type":"ContainerStarted","Data":"892aa0cc3a33f0b555e93bd0dbe5add8364f99f640db10590db3070404bee028"} Nov 24 08:12:11 crc kubenswrapper[4691]: I1124 08:12:11.492350 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-pkx2n" Nov 24 08:12:11 crc kubenswrapper[4691]: I1124 08:12:11.492517 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-pkx2n" Nov 24 08:12:11 crc kubenswrapper[4691]: I1124 08:12:11.508237 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=5.150841161 podStartE2EDuration="19.508154626s" podCreationTimestamp="2025-11-24 08:11:52 +0000 UTC" firstStartedPulling="2025-11-24 08:11:56.149653574 +0000 UTC m=+878.148602823" lastFinishedPulling="2025-11-24 08:12:10.506967039 +0000 UTC m=+892.505916288" observedRunningTime="2025-11-24 08:12:11.491226945 +0000 UTC m=+893.490176194" watchObservedRunningTime="2025-11-24 08:12:11.508154626 +0000 UTC m=+893.507103875" Nov 24 08:12:11 crc kubenswrapper[4691]: I1124 08:12:11.533184 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"7d275bbe-d927-40c6-83b6-ad6da7f2a83c","Type":"ContainerStarted","Data":"124190f4b13771c1d1800f1bb5c9ec5f8736f901d0ca7df22c9364643183d417"} Nov 24 08:12:11 crc kubenswrapper[4691]: E1124 08:12:11.536125 4691 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1759554c_7f60_492d_b11b_8cb45b0cc5be.slice/crio-conmon-0383e28b2d033834a11e0d695028ae284e39a2479366eb159d27b7e4535a2ed8.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1759554c_7f60_492d_b11b_8cb45b0cc5be.slice/crio-0383e28b2d033834a11e0d695028ae284e39a2479366eb159d27b7e4535a2ed8.scope\": RecentStats: unable to find data in memory cache]" Nov 24 08:12:11 crc kubenswrapper[4691]: I1124 08:12:11.561863 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-pkx2n" podStartSLOduration=12.246705236 podStartE2EDuration="21.561834614s" podCreationTimestamp="2025-11-24 08:11:50 +0000 UTC" firstStartedPulling="2025-11-24 08:11:56.415714016 +0000 UTC m=+878.414663265" lastFinishedPulling="2025-11-24 08:12:05.730843394 +0000 UTC m=+887.729792643" observedRunningTime="2025-11-24 08:12:11.545405957 +0000 UTC m=+893.544355206" watchObservedRunningTime="2025-11-24 08:12:11.561834614 +0000 UTC m=+893.560783863" Nov 24 08:12:11 crc kubenswrapper[4691]: I1124 08:12:11.572213 4691 generic.go:334] "Generic (PLEG): container finished" podID="5021ba85-77e5-4fc8-8816-5ad1587b82e5" containerID="ec01f58affbe1eea667e7af5646babe5aa87775b6d93c54fa388578b6e802dc5" exitCode=0 Nov 24 08:12:11 crc kubenswrapper[4691]: I1124 08:12:11.572289 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"5021ba85-77e5-4fc8-8816-5ad1587b82e5","Type":"ContainerDied","Data":"ec01f58affbe1eea667e7af5646babe5aa87775b6d93c54fa388578b6e802dc5"} Nov 24 08:12:11 crc kubenswrapper[4691]: I1124 08:12:11.579940 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=9.378005694 podStartE2EDuration="23.579923219s" podCreationTimestamp="2025-11-24 08:11:48 +0000 UTC" firstStartedPulling="2025-11-24 08:11:56.312699336 +0000 UTC m=+878.311648585" lastFinishedPulling="2025-11-24 08:12:10.514616851 +0000 UTC m=+892.513566110" observedRunningTime="2025-11-24 08:12:11.572613877 +0000 UTC m=+893.571563126" watchObservedRunningTime="2025-11-24 08:12:11.579923219 +0000 UTC m=+893.578872478" Nov 24 08:12:11 crc kubenswrapper[4691]: I1124 08:12:11.841402 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-dv56l" Nov 24 08:12:11 crc kubenswrapper[4691]: I1124 08:12:11.893927 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ck2s5\" (UniqueName: \"kubernetes.io/projected/1759554c-7f60-492d-b11b-8cb45b0cc5be-kube-api-access-ck2s5\") pod \"1759554c-7f60-492d-b11b-8cb45b0cc5be\" (UID: \"1759554c-7f60-492d-b11b-8cb45b0cc5be\") " Nov 24 08:12:11 crc kubenswrapper[4691]: I1124 08:12:11.894250 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1759554c-7f60-492d-b11b-8cb45b0cc5be-dns-svc\") pod \"1759554c-7f60-492d-b11b-8cb45b0cc5be\" (UID: \"1759554c-7f60-492d-b11b-8cb45b0cc5be\") " Nov 24 08:12:11 crc kubenswrapper[4691]: I1124 08:12:11.894292 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1759554c-7f60-492d-b11b-8cb45b0cc5be-config\") pod \"1759554c-7f60-492d-b11b-8cb45b0cc5be\" (UID: \"1759554c-7f60-492d-b11b-8cb45b0cc5be\") " Nov 24 08:12:11 crc kubenswrapper[4691]: I1124 08:12:11.899119 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1759554c-7f60-492d-b11b-8cb45b0cc5be-kube-api-access-ck2s5" (OuterVolumeSpecName: "kube-api-access-ck2s5") pod "1759554c-7f60-492d-b11b-8cb45b0cc5be" (UID: "1759554c-7f60-492d-b11b-8cb45b0cc5be"). InnerVolumeSpecName "kube-api-access-ck2s5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:12:11 crc kubenswrapper[4691]: I1124 08:12:11.930606 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1759554c-7f60-492d-b11b-8cb45b0cc5be-config" (OuterVolumeSpecName: "config") pod "1759554c-7f60-492d-b11b-8cb45b0cc5be" (UID: "1759554c-7f60-492d-b11b-8cb45b0cc5be"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:12:11 crc kubenswrapper[4691]: I1124 08:12:11.946722 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1759554c-7f60-492d-b11b-8cb45b0cc5be-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1759554c-7f60-492d-b11b-8cb45b0cc5be" (UID: "1759554c-7f60-492d-b11b-8cb45b0cc5be"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:12:11 crc kubenswrapper[4691]: I1124 08:12:11.996932 4691 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1759554c-7f60-492d-b11b-8cb45b0cc5be-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:11 crc kubenswrapper[4691]: I1124 08:12:11.996991 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1759554c-7f60-492d-b11b-8cb45b0cc5be-config\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:11 crc kubenswrapper[4691]: I1124 08:12:11.997001 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ck2s5\" (UniqueName: \"kubernetes.io/projected/1759554c-7f60-492d-b11b-8cb45b0cc5be-kube-api-access-ck2s5\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:12 crc kubenswrapper[4691]: I1124 08:12:12.188415 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 24 08:12:12 crc kubenswrapper[4691]: I1124 08:12:12.226035 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 24 08:12:12 crc kubenswrapper[4691]: I1124 08:12:12.584719 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-dv56l" Nov 24 08:12:12 crc kubenswrapper[4691]: I1124 08:12:12.584733 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-dv56l" event={"ID":"1759554c-7f60-492d-b11b-8cb45b0cc5be","Type":"ContainerDied","Data":"1375709cda165f61c7695105a18250134e999db7d2885999dd37234941727d35"} Nov 24 08:12:12 crc kubenswrapper[4691]: I1124 08:12:12.585281 4691 scope.go:117] "RemoveContainer" containerID="0383e28b2d033834a11e0d695028ae284e39a2479366eb159d27b7e4535a2ed8" Nov 24 08:12:12 crc kubenswrapper[4691]: I1124 08:12:12.587810 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"5021ba85-77e5-4fc8-8816-5ad1587b82e5","Type":"ContainerStarted","Data":"e516b4a117ca2401e396b22b2c4aaae441d65d1d1e91b6a5a859aaf5a2cff54b"} Nov 24 08:12:12 crc kubenswrapper[4691]: I1124 08:12:12.591076 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"be26bfeb-e0f8-4c67-8938-55d8399b717c","Type":"ContainerStarted","Data":"a3ab02b67f841713904a3690dcf2a523f55ace752a91e6c1746c375732cb0c7f"} Nov 24 08:12:12 crc kubenswrapper[4691]: I1124 08:12:12.591359 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 24 08:12:12 crc kubenswrapper[4691]: I1124 08:12:12.615969 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=19.27995195 podStartE2EDuration="29.615935416s" podCreationTimestamp="2025-11-24 08:11:43 +0000 UTC" firstStartedPulling="2025-11-24 08:11:55.835242779 +0000 UTC m=+877.834192028" lastFinishedPulling="2025-11-24 08:12:06.171226245 +0000 UTC m=+888.170175494" observedRunningTime="2025-11-24 08:12:12.613922177 +0000 UTC m=+894.612871446" watchObservedRunningTime="2025-11-24 08:12:12.615935416 +0000 UTC m=+894.614884665" Nov 24 08:12:12 crc kubenswrapper[4691]: I1124 08:12:12.616631 4691 scope.go:117] "RemoveContainer" containerID="81353da05a9c338bfab618f3ae658041a07ae96f83fde4c45e9a98d612a2ee72" Nov 24 08:12:12 crc kubenswrapper[4691]: I1124 08:12:12.635421 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-dv56l"] Nov 24 08:12:12 crc kubenswrapper[4691]: I1124 08:12:12.642463 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-dv56l"] Nov 24 08:12:12 crc kubenswrapper[4691]: I1124 08:12:12.651173 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 24 08:12:12 crc kubenswrapper[4691]: I1124 08:12:12.666183 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=20.874907637 podStartE2EDuration="30.666155933s" podCreationTimestamp="2025-11-24 08:11:42 +0000 UTC" firstStartedPulling="2025-11-24 08:11:56.043711519 +0000 UTC m=+878.042660768" lastFinishedPulling="2025-11-24 08:12:05.834959815 +0000 UTC m=+887.833909064" observedRunningTime="2025-11-24 08:12:12.659030486 +0000 UTC m=+894.657979745" watchObservedRunningTime="2025-11-24 08:12:12.666155933 +0000 UTC m=+894.665105182" Nov 24 08:12:12 crc kubenswrapper[4691]: I1124 08:12:12.770423 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1759554c-7f60-492d-b11b-8cb45b0cc5be" path="/var/lib/kubelet/pods/1759554c-7f60-492d-b11b-8cb45b0cc5be/volumes" Nov 24 08:12:12 crc kubenswrapper[4691]: I1124 08:12:12.970606 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-kj7mb"] Nov 24 08:12:12 crc kubenswrapper[4691]: E1124 08:12:12.971143 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1759554c-7f60-492d-b11b-8cb45b0cc5be" containerName="dnsmasq-dns" Nov 24 08:12:12 crc kubenswrapper[4691]: I1124 08:12:12.971208 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="1759554c-7f60-492d-b11b-8cb45b0cc5be" containerName="dnsmasq-dns" Nov 24 08:12:12 crc kubenswrapper[4691]: E1124 08:12:12.971293 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1759554c-7f60-492d-b11b-8cb45b0cc5be" containerName="init" Nov 24 08:12:12 crc kubenswrapper[4691]: I1124 08:12:12.971347 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="1759554c-7f60-492d-b11b-8cb45b0cc5be" containerName="init" Nov 24 08:12:12 crc kubenswrapper[4691]: I1124 08:12:12.971562 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="1759554c-7f60-492d-b11b-8cb45b0cc5be" containerName="dnsmasq-dns" Nov 24 08:12:12 crc kubenswrapper[4691]: I1124 08:12:12.972427 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-kj7mb" Nov 24 08:12:12 crc kubenswrapper[4691]: I1124 08:12:12.975475 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 24 08:12:12 crc kubenswrapper[4691]: I1124 08:12:12.988911 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-kj7mb"] Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.031146 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-hs48v"] Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.032152 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-hs48v" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.034397 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.045662 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-hs48v"] Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.118292 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8ec9052-6129-4592-a4b5-0eb55cb17ee6-config\") pod \"dnsmasq-dns-6bc7876d45-kj7mb\" (UID: \"d8ec9052-6129-4592-a4b5-0eb55cb17ee6\") " pod="openstack/dnsmasq-dns-6bc7876d45-kj7mb" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.118581 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-56vqf\" (UniqueName: \"kubernetes.io/projected/d8ec9052-6129-4592-a4b5-0eb55cb17ee6-kube-api-access-56vqf\") pod \"dnsmasq-dns-6bc7876d45-kj7mb\" (UID: \"d8ec9052-6129-4592-a4b5-0eb55cb17ee6\") " pod="openstack/dnsmasq-dns-6bc7876d45-kj7mb" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.118751 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d8ec9052-6129-4592-a4b5-0eb55cb17ee6-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-kj7mb\" (UID: \"d8ec9052-6129-4592-a4b5-0eb55cb17ee6\") " pod="openstack/dnsmasq-dns-6bc7876d45-kj7mb" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.118829 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d8ec9052-6129-4592-a4b5-0eb55cb17ee6-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-kj7mb\" (UID: \"d8ec9052-6129-4592-a4b5-0eb55cb17ee6\") " pod="openstack/dnsmasq-dns-6bc7876d45-kj7mb" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.220784 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8ec9052-6129-4592-a4b5-0eb55cb17ee6-config\") pod \"dnsmasq-dns-6bc7876d45-kj7mb\" (UID: \"d8ec9052-6129-4592-a4b5-0eb55cb17ee6\") " pod="openstack/dnsmasq-dns-6bc7876d45-kj7mb" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.220839 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-56vqf\" (UniqueName: \"kubernetes.io/projected/d8ec9052-6129-4592-a4b5-0eb55cb17ee6-kube-api-access-56vqf\") pod \"dnsmasq-dns-6bc7876d45-kj7mb\" (UID: \"d8ec9052-6129-4592-a4b5-0eb55cb17ee6\") " pod="openstack/dnsmasq-dns-6bc7876d45-kj7mb" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.220879 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/16f9ca32-c0b3-4269-af05-a68a6d21269b-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-hs48v\" (UID: \"16f9ca32-c0b3-4269-af05-a68a6d21269b\") " pod="openstack/ovn-controller-metrics-hs48v" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.220918 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/16f9ca32-c0b3-4269-af05-a68a6d21269b-ovn-rundir\") pod \"ovn-controller-metrics-hs48v\" (UID: \"16f9ca32-c0b3-4269-af05-a68a6d21269b\") " pod="openstack/ovn-controller-metrics-hs48v" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.220945 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d8ec9052-6129-4592-a4b5-0eb55cb17ee6-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-kj7mb\" (UID: \"d8ec9052-6129-4592-a4b5-0eb55cb17ee6\") " pod="openstack/dnsmasq-dns-6bc7876d45-kj7mb" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.220963 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d8ec9052-6129-4592-a4b5-0eb55cb17ee6-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-kj7mb\" (UID: \"d8ec9052-6129-4592-a4b5-0eb55cb17ee6\") " pod="openstack/dnsmasq-dns-6bc7876d45-kj7mb" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.221045 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6fsj8\" (UniqueName: \"kubernetes.io/projected/16f9ca32-c0b3-4269-af05-a68a6d21269b-kube-api-access-6fsj8\") pod \"ovn-controller-metrics-hs48v\" (UID: \"16f9ca32-c0b3-4269-af05-a68a6d21269b\") " pod="openstack/ovn-controller-metrics-hs48v" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.221104 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/16f9ca32-c0b3-4269-af05-a68a6d21269b-ovs-rundir\") pod \"ovn-controller-metrics-hs48v\" (UID: \"16f9ca32-c0b3-4269-af05-a68a6d21269b\") " pod="openstack/ovn-controller-metrics-hs48v" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.221224 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16f9ca32-c0b3-4269-af05-a68a6d21269b-config\") pod \"ovn-controller-metrics-hs48v\" (UID: \"16f9ca32-c0b3-4269-af05-a68a6d21269b\") " pod="openstack/ovn-controller-metrics-hs48v" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.221367 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16f9ca32-c0b3-4269-af05-a68a6d21269b-combined-ca-bundle\") pod \"ovn-controller-metrics-hs48v\" (UID: \"16f9ca32-c0b3-4269-af05-a68a6d21269b\") " pod="openstack/ovn-controller-metrics-hs48v" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.221863 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d8ec9052-6129-4592-a4b5-0eb55cb17ee6-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-kj7mb\" (UID: \"d8ec9052-6129-4592-a4b5-0eb55cb17ee6\") " pod="openstack/dnsmasq-dns-6bc7876d45-kj7mb" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.222489 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d8ec9052-6129-4592-a4b5-0eb55cb17ee6-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-kj7mb\" (UID: \"d8ec9052-6129-4592-a4b5-0eb55cb17ee6\") " pod="openstack/dnsmasq-dns-6bc7876d45-kj7mb" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.222563 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8ec9052-6129-4592-a4b5-0eb55cb17ee6-config\") pod \"dnsmasq-dns-6bc7876d45-kj7mb\" (UID: \"d8ec9052-6129-4592-a4b5-0eb55cb17ee6\") " pod="openstack/dnsmasq-dns-6bc7876d45-kj7mb" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.239531 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-56vqf\" (UniqueName: \"kubernetes.io/projected/d8ec9052-6129-4592-a4b5-0eb55cb17ee6-kube-api-access-56vqf\") pod \"dnsmasq-dns-6bc7876d45-kj7mb\" (UID: \"d8ec9052-6129-4592-a4b5-0eb55cb17ee6\") " pod="openstack/dnsmasq-dns-6bc7876d45-kj7mb" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.288411 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-kj7mb" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.330303 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/16f9ca32-c0b3-4269-af05-a68a6d21269b-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-hs48v\" (UID: \"16f9ca32-c0b3-4269-af05-a68a6d21269b\") " pod="openstack/ovn-controller-metrics-hs48v" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.330376 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/16f9ca32-c0b3-4269-af05-a68a6d21269b-ovn-rundir\") pod \"ovn-controller-metrics-hs48v\" (UID: \"16f9ca32-c0b3-4269-af05-a68a6d21269b\") " pod="openstack/ovn-controller-metrics-hs48v" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.330459 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6fsj8\" (UniqueName: \"kubernetes.io/projected/16f9ca32-c0b3-4269-af05-a68a6d21269b-kube-api-access-6fsj8\") pod \"ovn-controller-metrics-hs48v\" (UID: \"16f9ca32-c0b3-4269-af05-a68a6d21269b\") " pod="openstack/ovn-controller-metrics-hs48v" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.330480 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/16f9ca32-c0b3-4269-af05-a68a6d21269b-ovs-rundir\") pod \"ovn-controller-metrics-hs48v\" (UID: \"16f9ca32-c0b3-4269-af05-a68a6d21269b\") " pod="openstack/ovn-controller-metrics-hs48v" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.330527 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16f9ca32-c0b3-4269-af05-a68a6d21269b-config\") pod \"ovn-controller-metrics-hs48v\" (UID: \"16f9ca32-c0b3-4269-af05-a68a6d21269b\") " pod="openstack/ovn-controller-metrics-hs48v" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.330552 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16f9ca32-c0b3-4269-af05-a68a6d21269b-combined-ca-bundle\") pod \"ovn-controller-metrics-hs48v\" (UID: \"16f9ca32-c0b3-4269-af05-a68a6d21269b\") " pod="openstack/ovn-controller-metrics-hs48v" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.331700 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/16f9ca32-c0b3-4269-af05-a68a6d21269b-ovs-rundir\") pod \"ovn-controller-metrics-hs48v\" (UID: \"16f9ca32-c0b3-4269-af05-a68a6d21269b\") " pod="openstack/ovn-controller-metrics-hs48v" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.331916 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/16f9ca32-c0b3-4269-af05-a68a6d21269b-ovn-rundir\") pod \"ovn-controller-metrics-hs48v\" (UID: \"16f9ca32-c0b3-4269-af05-a68a6d21269b\") " pod="openstack/ovn-controller-metrics-hs48v" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.336372 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/16f9ca32-c0b3-4269-af05-a68a6d21269b-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-hs48v\" (UID: \"16f9ca32-c0b3-4269-af05-a68a6d21269b\") " pod="openstack/ovn-controller-metrics-hs48v" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.337621 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16f9ca32-c0b3-4269-af05-a68a6d21269b-combined-ca-bundle\") pod \"ovn-controller-metrics-hs48v\" (UID: \"16f9ca32-c0b3-4269-af05-a68a6d21269b\") " pod="openstack/ovn-controller-metrics-hs48v" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.345832 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16f9ca32-c0b3-4269-af05-a68a6d21269b-config\") pod \"ovn-controller-metrics-hs48v\" (UID: \"16f9ca32-c0b3-4269-af05-a68a6d21269b\") " pod="openstack/ovn-controller-metrics-hs48v" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.360235 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6fsj8\" (UniqueName: \"kubernetes.io/projected/16f9ca32-c0b3-4269-af05-a68a6d21269b-kube-api-access-6fsj8\") pod \"ovn-controller-metrics-hs48v\" (UID: \"16f9ca32-c0b3-4269-af05-a68a6d21269b\") " pod="openstack/ovn-controller-metrics-hs48v" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.412891 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-kj7mb"] Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.437652 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8554648995-s58t5"] Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.440196 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-s58t5" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.452701 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.462998 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-s58t5"] Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.534259 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-s58t5\" (UID: \"fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9\") " pod="openstack/dnsmasq-dns-8554648995-s58t5" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.534353 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nv5tf\" (UniqueName: \"kubernetes.io/projected/fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9-kube-api-access-nv5tf\") pod \"dnsmasq-dns-8554648995-s58t5\" (UID: \"fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9\") " pod="openstack/dnsmasq-dns-8554648995-s58t5" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.534422 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-s58t5\" (UID: \"fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9\") " pod="openstack/dnsmasq-dns-8554648995-s58t5" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.534614 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9-config\") pod \"dnsmasq-dns-8554648995-s58t5\" (UID: \"fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9\") " pod="openstack/dnsmasq-dns-8554648995-s58t5" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.534657 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9-dns-svc\") pod \"dnsmasq-dns-8554648995-s58t5\" (UID: \"fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9\") " pod="openstack/dnsmasq-dns-8554648995-s58t5" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.555724 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.555773 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.636758 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9-dns-svc\") pod \"dnsmasq-dns-8554648995-s58t5\" (UID: \"fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9\") " pod="openstack/dnsmasq-dns-8554648995-s58t5" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.636832 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-s58t5\" (UID: \"fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9\") " pod="openstack/dnsmasq-dns-8554648995-s58t5" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.636876 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nv5tf\" (UniqueName: \"kubernetes.io/projected/fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9-kube-api-access-nv5tf\") pod \"dnsmasq-dns-8554648995-s58t5\" (UID: \"fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9\") " pod="openstack/dnsmasq-dns-8554648995-s58t5" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.636948 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-s58t5\" (UID: \"fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9\") " pod="openstack/dnsmasq-dns-8554648995-s58t5" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.637026 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9-config\") pod \"dnsmasq-dns-8554648995-s58t5\" (UID: \"fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9\") " pod="openstack/dnsmasq-dns-8554648995-s58t5" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.638384 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9-dns-svc\") pod \"dnsmasq-dns-8554648995-s58t5\" (UID: \"fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9\") " pod="openstack/dnsmasq-dns-8554648995-s58t5" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.638384 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-s58t5\" (UID: \"fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9\") " pod="openstack/dnsmasq-dns-8554648995-s58t5" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.638695 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9-config\") pod \"dnsmasq-dns-8554648995-s58t5\" (UID: \"fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9\") " pod="openstack/dnsmasq-dns-8554648995-s58t5" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.639320 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-s58t5\" (UID: \"fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9\") " pod="openstack/dnsmasq-dns-8554648995-s58t5" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.648820 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-hs48v" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.655738 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nv5tf\" (UniqueName: \"kubernetes.io/projected/fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9-kube-api-access-nv5tf\") pod \"dnsmasq-dns-8554648995-s58t5\" (UID: \"fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9\") " pod="openstack/dnsmasq-dns-8554648995-s58t5" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.808043 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-s58t5" Nov 24 08:12:13 crc kubenswrapper[4691]: I1124 08:12:13.840817 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-kj7mb"] Nov 24 08:12:14 crc kubenswrapper[4691]: I1124 08:12:14.071925 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 24 08:12:14 crc kubenswrapper[4691]: I1124 08:12:14.110283 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-hs48v"] Nov 24 08:12:14 crc kubenswrapper[4691]: W1124 08:12:14.122803 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod16f9ca32_c0b3_4269_af05_a68a6d21269b.slice/crio-523bb2c619304adfe38f3e793184d46f7a420c1e26d074d1c65e4f2b98246a63 WatchSource:0}: Error finding container 523bb2c619304adfe38f3e793184d46f7a420c1e26d074d1c65e4f2b98246a63: Status 404 returned error can't find the container with id 523bb2c619304adfe38f3e793184d46f7a420c1e26d074d1c65e4f2b98246a63 Nov 24 08:12:14 crc kubenswrapper[4691]: I1124 08:12:14.125659 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 24 08:12:14 crc kubenswrapper[4691]: I1124 08:12:14.267228 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-s58t5"] Nov 24 08:12:14 crc kubenswrapper[4691]: W1124 08:12:14.274717 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfd2c2eb8_8ce7_4973_98c2_b9d7a86969a9.slice/crio-f5a826496bb18682c6c3959835285515529b80f7c9b7e8a75ce3124e89460eef WatchSource:0}: Error finding container f5a826496bb18682c6c3959835285515529b80f7c9b7e8a75ce3124e89460eef: Status 404 returned error can't find the container with id f5a826496bb18682c6c3959835285515529b80f7c9b7e8a75ce3124e89460eef Nov 24 08:12:14 crc kubenswrapper[4691]: I1124 08:12:14.613590 4691 generic.go:334] "Generic (PLEG): container finished" podID="fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9" containerID="d9012fefe11d0496f43c751a566d038aeec2dec5c960a140f2c518dbb028f5fb" exitCode=0 Nov 24 08:12:14 crc kubenswrapper[4691]: I1124 08:12:14.613697 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-s58t5" event={"ID":"fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9","Type":"ContainerDied","Data":"d9012fefe11d0496f43c751a566d038aeec2dec5c960a140f2c518dbb028f5fb"} Nov 24 08:12:14 crc kubenswrapper[4691]: I1124 08:12:14.613988 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-s58t5" event={"ID":"fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9","Type":"ContainerStarted","Data":"f5a826496bb18682c6c3959835285515529b80f7c9b7e8a75ce3124e89460eef"} Nov 24 08:12:14 crc kubenswrapper[4691]: I1124 08:12:14.615918 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-hs48v" event={"ID":"16f9ca32-c0b3-4269-af05-a68a6d21269b","Type":"ContainerStarted","Data":"5893f9541d49138f0bae89046c75ad5090d0b354d8e5494858944cfe24394cf6"} Nov 24 08:12:14 crc kubenswrapper[4691]: I1124 08:12:14.615978 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-hs48v" event={"ID":"16f9ca32-c0b3-4269-af05-a68a6d21269b","Type":"ContainerStarted","Data":"523bb2c619304adfe38f3e793184d46f7a420c1e26d074d1c65e4f2b98246a63"} Nov 24 08:12:14 crc kubenswrapper[4691]: I1124 08:12:14.617622 4691 generic.go:334] "Generic (PLEG): container finished" podID="d8ec9052-6129-4592-a4b5-0eb55cb17ee6" containerID="9dad793bbc1016553d0da2ae1feff243634a5be08b543431b7b94e8f1b8b0eb9" exitCode=0 Nov 24 08:12:14 crc kubenswrapper[4691]: I1124 08:12:14.618455 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-kj7mb" event={"ID":"d8ec9052-6129-4592-a4b5-0eb55cb17ee6","Type":"ContainerDied","Data":"9dad793bbc1016553d0da2ae1feff243634a5be08b543431b7b94e8f1b8b0eb9"} Nov 24 08:12:14 crc kubenswrapper[4691]: I1124 08:12:14.618499 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-kj7mb" event={"ID":"d8ec9052-6129-4592-a4b5-0eb55cb17ee6","Type":"ContainerStarted","Data":"de8686a0bbf9a1e9f842eda56bd338b83a6db02a7c5cda1eec91b471a7a3f189"} Nov 24 08:12:14 crc kubenswrapper[4691]: I1124 08:12:14.618519 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 24 08:12:14 crc kubenswrapper[4691]: I1124 08:12:14.685236 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-hs48v" podStartSLOduration=1.685213992 podStartE2EDuration="1.685213992s" podCreationTimestamp="2025-11-24 08:12:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:12:14.67893766 +0000 UTC m=+896.677886909" watchObservedRunningTime="2025-11-24 08:12:14.685213992 +0000 UTC m=+896.684163241" Nov 24 08:12:14 crc kubenswrapper[4691]: I1124 08:12:14.722483 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 24 08:12:14 crc kubenswrapper[4691]: I1124 08:12:14.847267 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 24 08:12:14 crc kubenswrapper[4691]: I1124 08:12:14.847674 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.037150 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.039032 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.043738 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.063326 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.063528 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.063893 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-69cj8" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.063588 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.106502 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-kj7mb" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.180764 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.183090 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-56vqf\" (UniqueName: \"kubernetes.io/projected/d8ec9052-6129-4592-a4b5-0eb55cb17ee6-kube-api-access-56vqf\") pod \"d8ec9052-6129-4592-a4b5-0eb55cb17ee6\" (UID: \"d8ec9052-6129-4592-a4b5-0eb55cb17ee6\") " Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.183180 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d8ec9052-6129-4592-a4b5-0eb55cb17ee6-ovsdbserver-sb\") pod \"d8ec9052-6129-4592-a4b5-0eb55cb17ee6\" (UID: \"d8ec9052-6129-4592-a4b5-0eb55cb17ee6\") " Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.183229 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8ec9052-6129-4592-a4b5-0eb55cb17ee6-config\") pod \"d8ec9052-6129-4592-a4b5-0eb55cb17ee6\" (UID: \"d8ec9052-6129-4592-a4b5-0eb55cb17ee6\") " Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.183265 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d8ec9052-6129-4592-a4b5-0eb55cb17ee6-dns-svc\") pod \"d8ec9052-6129-4592-a4b5-0eb55cb17ee6\" (UID: \"d8ec9052-6129-4592-a4b5-0eb55cb17ee6\") " Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.183573 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4897f50d-627f-434b-a0d8-84854f219509-scripts\") pod \"ovn-northd-0\" (UID: \"4897f50d-627f-434b-a0d8-84854f219509\") " pod="openstack/ovn-northd-0" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.183615 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4897f50d-627f-434b-a0d8-84854f219509-config\") pod \"ovn-northd-0\" (UID: \"4897f50d-627f-434b-a0d8-84854f219509\") " pod="openstack/ovn-northd-0" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.183810 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4897f50d-627f-434b-a0d8-84854f219509-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"4897f50d-627f-434b-a0d8-84854f219509\") " pod="openstack/ovn-northd-0" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.183960 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nf4wt\" (UniqueName: \"kubernetes.io/projected/4897f50d-627f-434b-a0d8-84854f219509-kube-api-access-nf4wt\") pod \"ovn-northd-0\" (UID: \"4897f50d-627f-434b-a0d8-84854f219509\") " pod="openstack/ovn-northd-0" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.184052 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4897f50d-627f-434b-a0d8-84854f219509-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"4897f50d-627f-434b-a0d8-84854f219509\") " pod="openstack/ovn-northd-0" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.184430 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4897f50d-627f-434b-a0d8-84854f219509-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"4897f50d-627f-434b-a0d8-84854f219509\") " pod="openstack/ovn-northd-0" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.184525 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/4897f50d-627f-434b-a0d8-84854f219509-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"4897f50d-627f-434b-a0d8-84854f219509\") " pod="openstack/ovn-northd-0" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.188657 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8ec9052-6129-4592-a4b5-0eb55cb17ee6-kube-api-access-56vqf" (OuterVolumeSpecName: "kube-api-access-56vqf") pod "d8ec9052-6129-4592-a4b5-0eb55cb17ee6" (UID: "d8ec9052-6129-4592-a4b5-0eb55cb17ee6"). InnerVolumeSpecName "kube-api-access-56vqf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.206809 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d8ec9052-6129-4592-a4b5-0eb55cb17ee6-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d8ec9052-6129-4592-a4b5-0eb55cb17ee6" (UID: "d8ec9052-6129-4592-a4b5-0eb55cb17ee6"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.216229 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d8ec9052-6129-4592-a4b5-0eb55cb17ee6-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d8ec9052-6129-4592-a4b5-0eb55cb17ee6" (UID: "d8ec9052-6129-4592-a4b5-0eb55cb17ee6"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.217972 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d8ec9052-6129-4592-a4b5-0eb55cb17ee6-config" (OuterVolumeSpecName: "config") pod "d8ec9052-6129-4592-a4b5-0eb55cb17ee6" (UID: "d8ec9052-6129-4592-a4b5-0eb55cb17ee6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.286592 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4897f50d-627f-434b-a0d8-84854f219509-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"4897f50d-627f-434b-a0d8-84854f219509\") " pod="openstack/ovn-northd-0" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.286659 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nf4wt\" (UniqueName: \"kubernetes.io/projected/4897f50d-627f-434b-a0d8-84854f219509-kube-api-access-nf4wt\") pod \"ovn-northd-0\" (UID: \"4897f50d-627f-434b-a0d8-84854f219509\") " pod="openstack/ovn-northd-0" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.286703 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4897f50d-627f-434b-a0d8-84854f219509-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"4897f50d-627f-434b-a0d8-84854f219509\") " pod="openstack/ovn-northd-0" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.286843 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4897f50d-627f-434b-a0d8-84854f219509-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"4897f50d-627f-434b-a0d8-84854f219509\") " pod="openstack/ovn-northd-0" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.286882 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/4897f50d-627f-434b-a0d8-84854f219509-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"4897f50d-627f-434b-a0d8-84854f219509\") " pod="openstack/ovn-northd-0" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.286988 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4897f50d-627f-434b-a0d8-84854f219509-scripts\") pod \"ovn-northd-0\" (UID: \"4897f50d-627f-434b-a0d8-84854f219509\") " pod="openstack/ovn-northd-0" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.287643 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4897f50d-627f-434b-a0d8-84854f219509-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"4897f50d-627f-434b-a0d8-84854f219509\") " pod="openstack/ovn-northd-0" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.287858 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4897f50d-627f-434b-a0d8-84854f219509-config\") pod \"ovn-northd-0\" (UID: \"4897f50d-627f-434b-a0d8-84854f219509\") " pod="openstack/ovn-northd-0" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.287992 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4897f50d-627f-434b-a0d8-84854f219509-scripts\") pod \"ovn-northd-0\" (UID: \"4897f50d-627f-434b-a0d8-84854f219509\") " pod="openstack/ovn-northd-0" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.288954 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4897f50d-627f-434b-a0d8-84854f219509-config\") pod \"ovn-northd-0\" (UID: \"4897f50d-627f-434b-a0d8-84854f219509\") " pod="openstack/ovn-northd-0" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.290337 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-56vqf\" (UniqueName: \"kubernetes.io/projected/d8ec9052-6129-4592-a4b5-0eb55cb17ee6-kube-api-access-56vqf\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.290372 4691 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d8ec9052-6129-4592-a4b5-0eb55cb17ee6-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.290388 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8ec9052-6129-4592-a4b5-0eb55cb17ee6-config\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.290403 4691 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d8ec9052-6129-4592-a4b5-0eb55cb17ee6-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.292970 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4897f50d-627f-434b-a0d8-84854f219509-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"4897f50d-627f-434b-a0d8-84854f219509\") " pod="openstack/ovn-northd-0" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.293148 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4897f50d-627f-434b-a0d8-84854f219509-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"4897f50d-627f-434b-a0d8-84854f219509\") " pod="openstack/ovn-northd-0" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.294658 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/4897f50d-627f-434b-a0d8-84854f219509-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"4897f50d-627f-434b-a0d8-84854f219509\") " pod="openstack/ovn-northd-0" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.308540 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nf4wt\" (UniqueName: \"kubernetes.io/projected/4897f50d-627f-434b-a0d8-84854f219509-kube-api-access-nf4wt\") pod \"ovn-northd-0\" (UID: \"4897f50d-627f-434b-a0d8-84854f219509\") " pod="openstack/ovn-northd-0" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.382865 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.631612 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-kj7mb" event={"ID":"d8ec9052-6129-4592-a4b5-0eb55cb17ee6","Type":"ContainerDied","Data":"de8686a0bbf9a1e9f842eda56bd338b83a6db02a7c5cda1eec91b471a7a3f189"} Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.631676 4691 scope.go:117] "RemoveContainer" containerID="9dad793bbc1016553d0da2ae1feff243634a5be08b543431b7b94e8f1b8b0eb9" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.631644 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-kj7mb" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.640682 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-s58t5" event={"ID":"fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9","Type":"ContainerStarted","Data":"049ba051cea9ea3f0f6d81959a5a848163f2a603c8ec26e416f47d5b69926cc4"} Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.640916 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8554648995-s58t5" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.673611 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8554648995-s58t5" podStartSLOduration=2.673571256 podStartE2EDuration="2.673571256s" podCreationTimestamp="2025-11-24 08:12:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:12:15.660014362 +0000 UTC m=+897.658963611" watchObservedRunningTime="2025-11-24 08:12:15.673571256 +0000 UTC m=+897.672520505" Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.700627 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-kj7mb"] Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.709726 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-kj7mb"] Nov 24 08:12:15 crc kubenswrapper[4691]: I1124 08:12:15.858823 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 24 08:12:16 crc kubenswrapper[4691]: I1124 08:12:16.649918 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"4897f50d-627f-434b-a0d8-84854f219509","Type":"ContainerStarted","Data":"4fffc99294e3a90b490c829f7774f5d3bfd95f29e24c514600b5a857213728ca"} Nov 24 08:12:16 crc kubenswrapper[4691]: I1124 08:12:16.773947 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8ec9052-6129-4592-a4b5-0eb55cb17ee6" path="/var/lib/kubelet/pods/d8ec9052-6129-4592-a4b5-0eb55cb17ee6/volumes" Nov 24 08:12:17 crc kubenswrapper[4691]: I1124 08:12:17.036394 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 24 08:12:17 crc kubenswrapper[4691]: I1124 08:12:17.150534 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-s58t5"] Nov 24 08:12:17 crc kubenswrapper[4691]: I1124 08:12:17.204523 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-dq5hb"] Nov 24 08:12:17 crc kubenswrapper[4691]: E1124 08:12:17.205433 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8ec9052-6129-4592-a4b5-0eb55cb17ee6" containerName="init" Nov 24 08:12:17 crc kubenswrapper[4691]: I1124 08:12:17.208497 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8ec9052-6129-4592-a4b5-0eb55cb17ee6" containerName="init" Nov 24 08:12:17 crc kubenswrapper[4691]: I1124 08:12:17.208749 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8ec9052-6129-4592-a4b5-0eb55cb17ee6" containerName="init" Nov 24 08:12:17 crc kubenswrapper[4691]: I1124 08:12:17.209876 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-dq5hb" Nov 24 08:12:17 crc kubenswrapper[4691]: I1124 08:12:17.253778 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-dq5hb"] Nov 24 08:12:17 crc kubenswrapper[4691]: I1124 08:12:17.327422 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lksst\" (UniqueName: \"kubernetes.io/projected/fac2b9b8-8c64-4322-9251-b22dc0e758ed-kube-api-access-lksst\") pod \"dnsmasq-dns-b8fbc5445-dq5hb\" (UID: \"fac2b9b8-8c64-4322-9251-b22dc0e758ed\") " pod="openstack/dnsmasq-dns-b8fbc5445-dq5hb" Nov 24 08:12:17 crc kubenswrapper[4691]: I1124 08:12:17.327522 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fac2b9b8-8c64-4322-9251-b22dc0e758ed-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-dq5hb\" (UID: \"fac2b9b8-8c64-4322-9251-b22dc0e758ed\") " pod="openstack/dnsmasq-dns-b8fbc5445-dq5hb" Nov 24 08:12:17 crc kubenswrapper[4691]: I1124 08:12:17.327568 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fac2b9b8-8c64-4322-9251-b22dc0e758ed-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-dq5hb\" (UID: \"fac2b9b8-8c64-4322-9251-b22dc0e758ed\") " pod="openstack/dnsmasq-dns-b8fbc5445-dq5hb" Nov 24 08:12:17 crc kubenswrapper[4691]: I1124 08:12:17.327606 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fac2b9b8-8c64-4322-9251-b22dc0e758ed-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-dq5hb\" (UID: \"fac2b9b8-8c64-4322-9251-b22dc0e758ed\") " pod="openstack/dnsmasq-dns-b8fbc5445-dq5hb" Nov 24 08:12:17 crc kubenswrapper[4691]: I1124 08:12:17.327631 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fac2b9b8-8c64-4322-9251-b22dc0e758ed-config\") pod \"dnsmasq-dns-b8fbc5445-dq5hb\" (UID: \"fac2b9b8-8c64-4322-9251-b22dc0e758ed\") " pod="openstack/dnsmasq-dns-b8fbc5445-dq5hb" Nov 24 08:12:17 crc kubenswrapper[4691]: I1124 08:12:17.428746 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fac2b9b8-8c64-4322-9251-b22dc0e758ed-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-dq5hb\" (UID: \"fac2b9b8-8c64-4322-9251-b22dc0e758ed\") " pod="openstack/dnsmasq-dns-b8fbc5445-dq5hb" Nov 24 08:12:17 crc kubenswrapper[4691]: I1124 08:12:17.429044 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fac2b9b8-8c64-4322-9251-b22dc0e758ed-config\") pod \"dnsmasq-dns-b8fbc5445-dq5hb\" (UID: \"fac2b9b8-8c64-4322-9251-b22dc0e758ed\") " pod="openstack/dnsmasq-dns-b8fbc5445-dq5hb" Nov 24 08:12:17 crc kubenswrapper[4691]: I1124 08:12:17.429194 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lksst\" (UniqueName: \"kubernetes.io/projected/fac2b9b8-8c64-4322-9251-b22dc0e758ed-kube-api-access-lksst\") pod \"dnsmasq-dns-b8fbc5445-dq5hb\" (UID: \"fac2b9b8-8c64-4322-9251-b22dc0e758ed\") " pod="openstack/dnsmasq-dns-b8fbc5445-dq5hb" Nov 24 08:12:17 crc kubenswrapper[4691]: I1124 08:12:17.429285 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fac2b9b8-8c64-4322-9251-b22dc0e758ed-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-dq5hb\" (UID: \"fac2b9b8-8c64-4322-9251-b22dc0e758ed\") " pod="openstack/dnsmasq-dns-b8fbc5445-dq5hb" Nov 24 08:12:17 crc kubenswrapper[4691]: I1124 08:12:17.429367 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fac2b9b8-8c64-4322-9251-b22dc0e758ed-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-dq5hb\" (UID: \"fac2b9b8-8c64-4322-9251-b22dc0e758ed\") " pod="openstack/dnsmasq-dns-b8fbc5445-dq5hb" Nov 24 08:12:17 crc kubenswrapper[4691]: I1124 08:12:17.429936 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fac2b9b8-8c64-4322-9251-b22dc0e758ed-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-dq5hb\" (UID: \"fac2b9b8-8c64-4322-9251-b22dc0e758ed\") " pod="openstack/dnsmasq-dns-b8fbc5445-dq5hb" Nov 24 08:12:17 crc kubenswrapper[4691]: I1124 08:12:17.430213 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fac2b9b8-8c64-4322-9251-b22dc0e758ed-config\") pod \"dnsmasq-dns-b8fbc5445-dq5hb\" (UID: \"fac2b9b8-8c64-4322-9251-b22dc0e758ed\") " pod="openstack/dnsmasq-dns-b8fbc5445-dq5hb" Nov 24 08:12:17 crc kubenswrapper[4691]: I1124 08:12:17.430374 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fac2b9b8-8c64-4322-9251-b22dc0e758ed-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-dq5hb\" (UID: \"fac2b9b8-8c64-4322-9251-b22dc0e758ed\") " pod="openstack/dnsmasq-dns-b8fbc5445-dq5hb" Nov 24 08:12:17 crc kubenswrapper[4691]: I1124 08:12:17.430764 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fac2b9b8-8c64-4322-9251-b22dc0e758ed-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-dq5hb\" (UID: \"fac2b9b8-8c64-4322-9251-b22dc0e758ed\") " pod="openstack/dnsmasq-dns-b8fbc5445-dq5hb" Nov 24 08:12:17 crc kubenswrapper[4691]: I1124 08:12:17.451584 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lksst\" (UniqueName: \"kubernetes.io/projected/fac2b9b8-8c64-4322-9251-b22dc0e758ed-kube-api-access-lksst\") pod \"dnsmasq-dns-b8fbc5445-dq5hb\" (UID: \"fac2b9b8-8c64-4322-9251-b22dc0e758ed\") " pod="openstack/dnsmasq-dns-b8fbc5445-dq5hb" Nov 24 08:12:17 crc kubenswrapper[4691]: I1124 08:12:17.456377 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 24 08:12:17 crc kubenswrapper[4691]: I1124 08:12:17.539256 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-dq5hb" Nov 24 08:12:17 crc kubenswrapper[4691]: I1124 08:12:17.600024 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 24 08:12:17 crc kubenswrapper[4691]: I1124 08:12:17.674575 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"4897f50d-627f-434b-a0d8-84854f219509","Type":"ContainerStarted","Data":"d18fdee0bc0f41eb8b46eb71b5db96fe8ff38d4dbb1a9ec7928ae05f4ec53af7"} Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.054466 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-dq5hb"] Nov 24 08:12:18 crc kubenswrapper[4691]: W1124 08:12:18.061732 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfac2b9b8_8c64_4322_9251_b22dc0e758ed.slice/crio-09062fa9b6b5733542dda4752973e9c1b8757ffc0d9261af3c2c111f07d132c5 WatchSource:0}: Error finding container 09062fa9b6b5733542dda4752973e9c1b8757ffc0d9261af3c2c111f07d132c5: Status 404 returned error can't find the container with id 09062fa9b6b5733542dda4752973e9c1b8757ffc0d9261af3c2c111f07d132c5 Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.345579 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.351138 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.353684 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.353932 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.354085 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-69bqt" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.354258 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.375807 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.452376 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"swift-storage-0\" (UID: \"94ab9159-218c-42b9-9c38-8e0701f3eeef\") " pod="openstack/swift-storage-0" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.452813 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/94ab9159-218c-42b9-9c38-8e0701f3eeef-etc-swift\") pod \"swift-storage-0\" (UID: \"94ab9159-218c-42b9-9c38-8e0701f3eeef\") " pod="openstack/swift-storage-0" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.453085 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/94ab9159-218c-42b9-9c38-8e0701f3eeef-cache\") pod \"swift-storage-0\" (UID: \"94ab9159-218c-42b9-9c38-8e0701f3eeef\") " pod="openstack/swift-storage-0" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.453263 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6c87c\" (UniqueName: \"kubernetes.io/projected/94ab9159-218c-42b9-9c38-8e0701f3eeef-kube-api-access-6c87c\") pod \"swift-storage-0\" (UID: \"94ab9159-218c-42b9-9c38-8e0701f3eeef\") " pod="openstack/swift-storage-0" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.453392 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/94ab9159-218c-42b9-9c38-8e0701f3eeef-lock\") pod \"swift-storage-0\" (UID: \"94ab9159-218c-42b9-9c38-8e0701f3eeef\") " pod="openstack/swift-storage-0" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.555334 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/94ab9159-218c-42b9-9c38-8e0701f3eeef-etc-swift\") pod \"swift-storage-0\" (UID: \"94ab9159-218c-42b9-9c38-8e0701f3eeef\") " pod="openstack/swift-storage-0" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.555491 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/94ab9159-218c-42b9-9c38-8e0701f3eeef-cache\") pod \"swift-storage-0\" (UID: \"94ab9159-218c-42b9-9c38-8e0701f3eeef\") " pod="openstack/swift-storage-0" Nov 24 08:12:18 crc kubenswrapper[4691]: E1124 08:12:18.555529 4691 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 24 08:12:18 crc kubenswrapper[4691]: E1124 08:12:18.555550 4691 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 24 08:12:18 crc kubenswrapper[4691]: E1124 08:12:18.555606 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/94ab9159-218c-42b9-9c38-8e0701f3eeef-etc-swift podName:94ab9159-218c-42b9-9c38-8e0701f3eeef nodeName:}" failed. No retries permitted until 2025-11-24 08:12:19.055584909 +0000 UTC m=+901.054534158 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/94ab9159-218c-42b9-9c38-8e0701f3eeef-etc-swift") pod "swift-storage-0" (UID: "94ab9159-218c-42b9-9c38-8e0701f3eeef") : configmap "swift-ring-files" not found Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.556087 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/94ab9159-218c-42b9-9c38-8e0701f3eeef-cache\") pod \"swift-storage-0\" (UID: \"94ab9159-218c-42b9-9c38-8e0701f3eeef\") " pod="openstack/swift-storage-0" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.556164 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6c87c\" (UniqueName: \"kubernetes.io/projected/94ab9159-218c-42b9-9c38-8e0701f3eeef-kube-api-access-6c87c\") pod \"swift-storage-0\" (UID: \"94ab9159-218c-42b9-9c38-8e0701f3eeef\") " pod="openstack/swift-storage-0" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.556404 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/94ab9159-218c-42b9-9c38-8e0701f3eeef-lock\") pod \"swift-storage-0\" (UID: \"94ab9159-218c-42b9-9c38-8e0701f3eeef\") " pod="openstack/swift-storage-0" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.556677 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/94ab9159-218c-42b9-9c38-8e0701f3eeef-lock\") pod \"swift-storage-0\" (UID: \"94ab9159-218c-42b9-9c38-8e0701f3eeef\") " pod="openstack/swift-storage-0" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.556767 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"swift-storage-0\" (UID: \"94ab9159-218c-42b9-9c38-8e0701f3eeef\") " pod="openstack/swift-storage-0" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.557154 4691 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"swift-storage-0\" (UID: \"94ab9159-218c-42b9-9c38-8e0701f3eeef\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/swift-storage-0" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.575471 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6c87c\" (UniqueName: \"kubernetes.io/projected/94ab9159-218c-42b9-9c38-8e0701f3eeef-kube-api-access-6c87c\") pod \"swift-storage-0\" (UID: \"94ab9159-218c-42b9-9c38-8e0701f3eeef\") " pod="openstack/swift-storage-0" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.587811 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"swift-storage-0\" (UID: \"94ab9159-218c-42b9-9c38-8e0701f3eeef\") " pod="openstack/swift-storage-0" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.595465 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-whbpx"] Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.596661 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-whbpx" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.598422 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.598851 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.598894 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.617068 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-whbpx"] Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.632860 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-tzjh7"] Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.634027 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-tzjh7" Nov 24 08:12:18 crc kubenswrapper[4691]: E1124 08:12:18.646430 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle dispersionconf etc-swift kube-api-access-mdbnl ring-data-devices scripts swiftconf], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/swift-ring-rebalance-whbpx" podUID="2bc65164-a535-4157-af1a-5c6c54e54198" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.651145 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-tzjh7"] Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.664085 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/5c00da32-542e-45b4-837c-67fa08ff49d3-ring-data-devices\") pod \"swift-ring-rebalance-tzjh7\" (UID: \"5c00da32-542e-45b4-837c-67fa08ff49d3\") " pod="openstack/swift-ring-rebalance-tzjh7" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.664132 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mdggw\" (UniqueName: \"kubernetes.io/projected/5c00da32-542e-45b4-837c-67fa08ff49d3-kube-api-access-mdggw\") pod \"swift-ring-rebalance-tzjh7\" (UID: \"5c00da32-542e-45b4-837c-67fa08ff49d3\") " pod="openstack/swift-ring-rebalance-tzjh7" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.664158 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/2bc65164-a535-4157-af1a-5c6c54e54198-dispersionconf\") pod \"swift-ring-rebalance-whbpx\" (UID: \"2bc65164-a535-4157-af1a-5c6c54e54198\") " pod="openstack/swift-ring-rebalance-whbpx" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.664176 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/2bc65164-a535-4157-af1a-5c6c54e54198-ring-data-devices\") pod \"swift-ring-rebalance-whbpx\" (UID: \"2bc65164-a535-4157-af1a-5c6c54e54198\") " pod="openstack/swift-ring-rebalance-whbpx" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.664194 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/5c00da32-542e-45b4-837c-67fa08ff49d3-etc-swift\") pod \"swift-ring-rebalance-tzjh7\" (UID: \"5c00da32-542e-45b4-837c-67fa08ff49d3\") " pod="openstack/swift-ring-rebalance-tzjh7" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.664280 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mdbnl\" (UniqueName: \"kubernetes.io/projected/2bc65164-a535-4157-af1a-5c6c54e54198-kube-api-access-mdbnl\") pod \"swift-ring-rebalance-whbpx\" (UID: \"2bc65164-a535-4157-af1a-5c6c54e54198\") " pod="openstack/swift-ring-rebalance-whbpx" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.664332 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/2bc65164-a535-4157-af1a-5c6c54e54198-swiftconf\") pod \"swift-ring-rebalance-whbpx\" (UID: \"2bc65164-a535-4157-af1a-5c6c54e54198\") " pod="openstack/swift-ring-rebalance-whbpx" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.664417 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/5c00da32-542e-45b4-837c-67fa08ff49d3-swiftconf\") pod \"swift-ring-rebalance-tzjh7\" (UID: \"5c00da32-542e-45b4-837c-67fa08ff49d3\") " pod="openstack/swift-ring-rebalance-tzjh7" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.664482 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5c00da32-542e-45b4-837c-67fa08ff49d3-scripts\") pod \"swift-ring-rebalance-tzjh7\" (UID: \"5c00da32-542e-45b4-837c-67fa08ff49d3\") " pod="openstack/swift-ring-rebalance-tzjh7" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.664515 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/2bc65164-a535-4157-af1a-5c6c54e54198-etc-swift\") pod \"swift-ring-rebalance-whbpx\" (UID: \"2bc65164-a535-4157-af1a-5c6c54e54198\") " pod="openstack/swift-ring-rebalance-whbpx" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.664572 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2bc65164-a535-4157-af1a-5c6c54e54198-scripts\") pod \"swift-ring-rebalance-whbpx\" (UID: \"2bc65164-a535-4157-af1a-5c6c54e54198\") " pod="openstack/swift-ring-rebalance-whbpx" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.664669 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bc65164-a535-4157-af1a-5c6c54e54198-combined-ca-bundle\") pod \"swift-ring-rebalance-whbpx\" (UID: \"2bc65164-a535-4157-af1a-5c6c54e54198\") " pod="openstack/swift-ring-rebalance-whbpx" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.664713 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c00da32-542e-45b4-837c-67fa08ff49d3-combined-ca-bundle\") pod \"swift-ring-rebalance-tzjh7\" (UID: \"5c00da32-542e-45b4-837c-67fa08ff49d3\") " pod="openstack/swift-ring-rebalance-tzjh7" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.664734 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/5c00da32-542e-45b4-837c-67fa08ff49d3-dispersionconf\") pod \"swift-ring-rebalance-tzjh7\" (UID: \"5c00da32-542e-45b4-837c-67fa08ff49d3\") " pod="openstack/swift-ring-rebalance-tzjh7" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.682714 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-whbpx"] Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.685672 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"4897f50d-627f-434b-a0d8-84854f219509","Type":"ContainerStarted","Data":"e3f54ebec4a563412f8f4f0cb3acefedcb357d91da73c54fd76e85319da946bb"} Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.686026 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.687405 4691 generic.go:334] "Generic (PLEG): container finished" podID="fac2b9b8-8c64-4322-9251-b22dc0e758ed" containerID="befa03cfd844199169b25bd1c69e1cabf04527abfb4870f653e219f8d39f0ff3" exitCode=0 Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.687515 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-whbpx" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.688074 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-dq5hb" event={"ID":"fac2b9b8-8c64-4322-9251-b22dc0e758ed","Type":"ContainerDied","Data":"befa03cfd844199169b25bd1c69e1cabf04527abfb4870f653e219f8d39f0ff3"} Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.688102 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-dq5hb" event={"ID":"fac2b9b8-8c64-4322-9251-b22dc0e758ed","Type":"ContainerStarted","Data":"09062fa9b6b5733542dda4752973e9c1b8757ffc0d9261af3c2c111f07d132c5"} Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.688427 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8554648995-s58t5" podUID="fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9" containerName="dnsmasq-dns" containerID="cri-o://049ba051cea9ea3f0f6d81959a5a848163f2a603c8ec26e416f47d5b69926cc4" gracePeriod=10 Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.700515 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-whbpx" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.713382 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=3.650070159 podStartE2EDuration="4.713365579s" podCreationTimestamp="2025-11-24 08:12:14 +0000 UTC" firstStartedPulling="2025-11-24 08:12:15.871610983 +0000 UTC m=+897.870560232" lastFinishedPulling="2025-11-24 08:12:16.934906403 +0000 UTC m=+898.933855652" observedRunningTime="2025-11-24 08:12:18.706630163 +0000 UTC m=+900.705579422" watchObservedRunningTime="2025-11-24 08:12:18.713365579 +0000 UTC m=+900.712314828" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.765937 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/5c00da32-542e-45b4-837c-67fa08ff49d3-ring-data-devices\") pod \"swift-ring-rebalance-tzjh7\" (UID: \"5c00da32-542e-45b4-837c-67fa08ff49d3\") " pod="openstack/swift-ring-rebalance-tzjh7" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.765994 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mdggw\" (UniqueName: \"kubernetes.io/projected/5c00da32-542e-45b4-837c-67fa08ff49d3-kube-api-access-mdggw\") pod \"swift-ring-rebalance-tzjh7\" (UID: \"5c00da32-542e-45b4-837c-67fa08ff49d3\") " pod="openstack/swift-ring-rebalance-tzjh7" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.766021 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/2bc65164-a535-4157-af1a-5c6c54e54198-ring-data-devices\") pod \"swift-ring-rebalance-whbpx\" (UID: \"2bc65164-a535-4157-af1a-5c6c54e54198\") " pod="openstack/swift-ring-rebalance-whbpx" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.766042 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/2bc65164-a535-4157-af1a-5c6c54e54198-dispersionconf\") pod \"swift-ring-rebalance-whbpx\" (UID: \"2bc65164-a535-4157-af1a-5c6c54e54198\") " pod="openstack/swift-ring-rebalance-whbpx" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.766066 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/5c00da32-542e-45b4-837c-67fa08ff49d3-etc-swift\") pod \"swift-ring-rebalance-tzjh7\" (UID: \"5c00da32-542e-45b4-837c-67fa08ff49d3\") " pod="openstack/swift-ring-rebalance-tzjh7" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.766140 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mdbnl\" (UniqueName: \"kubernetes.io/projected/2bc65164-a535-4157-af1a-5c6c54e54198-kube-api-access-mdbnl\") pod \"swift-ring-rebalance-whbpx\" (UID: \"2bc65164-a535-4157-af1a-5c6c54e54198\") " pod="openstack/swift-ring-rebalance-whbpx" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.766186 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/2bc65164-a535-4157-af1a-5c6c54e54198-swiftconf\") pod \"swift-ring-rebalance-whbpx\" (UID: \"2bc65164-a535-4157-af1a-5c6c54e54198\") " pod="openstack/swift-ring-rebalance-whbpx" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.766243 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/5c00da32-542e-45b4-837c-67fa08ff49d3-swiftconf\") pod \"swift-ring-rebalance-tzjh7\" (UID: \"5c00da32-542e-45b4-837c-67fa08ff49d3\") " pod="openstack/swift-ring-rebalance-tzjh7" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.766278 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5c00da32-542e-45b4-837c-67fa08ff49d3-scripts\") pod \"swift-ring-rebalance-tzjh7\" (UID: \"5c00da32-542e-45b4-837c-67fa08ff49d3\") " pod="openstack/swift-ring-rebalance-tzjh7" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.766303 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/2bc65164-a535-4157-af1a-5c6c54e54198-etc-swift\") pod \"swift-ring-rebalance-whbpx\" (UID: \"2bc65164-a535-4157-af1a-5c6c54e54198\") " pod="openstack/swift-ring-rebalance-whbpx" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.766374 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2bc65164-a535-4157-af1a-5c6c54e54198-scripts\") pod \"swift-ring-rebalance-whbpx\" (UID: \"2bc65164-a535-4157-af1a-5c6c54e54198\") " pod="openstack/swift-ring-rebalance-whbpx" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.766409 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bc65164-a535-4157-af1a-5c6c54e54198-combined-ca-bundle\") pod \"swift-ring-rebalance-whbpx\" (UID: \"2bc65164-a535-4157-af1a-5c6c54e54198\") " pod="openstack/swift-ring-rebalance-whbpx" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.766435 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c00da32-542e-45b4-837c-67fa08ff49d3-combined-ca-bundle\") pod \"swift-ring-rebalance-tzjh7\" (UID: \"5c00da32-542e-45b4-837c-67fa08ff49d3\") " pod="openstack/swift-ring-rebalance-tzjh7" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.766477 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/5c00da32-542e-45b4-837c-67fa08ff49d3-dispersionconf\") pod \"swift-ring-rebalance-tzjh7\" (UID: \"5c00da32-542e-45b4-837c-67fa08ff49d3\") " pod="openstack/swift-ring-rebalance-tzjh7" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.768156 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/2bc65164-a535-4157-af1a-5c6c54e54198-etc-swift\") pod \"swift-ring-rebalance-whbpx\" (UID: \"2bc65164-a535-4157-af1a-5c6c54e54198\") " pod="openstack/swift-ring-rebalance-whbpx" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.768599 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/5c00da32-542e-45b4-837c-67fa08ff49d3-etc-swift\") pod \"swift-ring-rebalance-tzjh7\" (UID: \"5c00da32-542e-45b4-837c-67fa08ff49d3\") " pod="openstack/swift-ring-rebalance-tzjh7" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.770601 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.771609 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bc65164-a535-4157-af1a-5c6c54e54198-combined-ca-bundle\") pod \"swift-ring-rebalance-whbpx\" (UID: \"2bc65164-a535-4157-af1a-5c6c54e54198\") " pod="openstack/swift-ring-rebalance-whbpx" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.771743 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.772670 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.774819 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/2bc65164-a535-4157-af1a-5c6c54e54198-swiftconf\") pod \"swift-ring-rebalance-whbpx\" (UID: \"2bc65164-a535-4157-af1a-5c6c54e54198\") " pod="openstack/swift-ring-rebalance-whbpx" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.775862 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/5c00da32-542e-45b4-837c-67fa08ff49d3-swiftconf\") pod \"swift-ring-rebalance-tzjh7\" (UID: \"5c00da32-542e-45b4-837c-67fa08ff49d3\") " pod="openstack/swift-ring-rebalance-tzjh7" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.776697 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c00da32-542e-45b4-837c-67fa08ff49d3-combined-ca-bundle\") pod \"swift-ring-rebalance-tzjh7\" (UID: \"5c00da32-542e-45b4-837c-67fa08ff49d3\") " pod="openstack/swift-ring-rebalance-tzjh7" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.779333 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/5c00da32-542e-45b4-837c-67fa08ff49d3-ring-data-devices\") pod \"swift-ring-rebalance-tzjh7\" (UID: \"5c00da32-542e-45b4-837c-67fa08ff49d3\") " pod="openstack/swift-ring-rebalance-tzjh7" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.781173 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/2bc65164-a535-4157-af1a-5c6c54e54198-ring-data-devices\") pod \"swift-ring-rebalance-whbpx\" (UID: \"2bc65164-a535-4157-af1a-5c6c54e54198\") " pod="openstack/swift-ring-rebalance-whbpx" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.782221 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2bc65164-a535-4157-af1a-5c6c54e54198-scripts\") pod \"swift-ring-rebalance-whbpx\" (UID: \"2bc65164-a535-4157-af1a-5c6c54e54198\") " pod="openstack/swift-ring-rebalance-whbpx" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.783269 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5c00da32-542e-45b4-837c-67fa08ff49d3-scripts\") pod \"swift-ring-rebalance-tzjh7\" (UID: \"5c00da32-542e-45b4-837c-67fa08ff49d3\") " pod="openstack/swift-ring-rebalance-tzjh7" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.784169 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/2bc65164-a535-4157-af1a-5c6c54e54198-dispersionconf\") pod \"swift-ring-rebalance-whbpx\" (UID: \"2bc65164-a535-4157-af1a-5c6c54e54198\") " pod="openstack/swift-ring-rebalance-whbpx" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.785286 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/5c00da32-542e-45b4-837c-67fa08ff49d3-dispersionconf\") pod \"swift-ring-rebalance-tzjh7\" (UID: \"5c00da32-542e-45b4-837c-67fa08ff49d3\") " pod="openstack/swift-ring-rebalance-tzjh7" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.789162 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mdggw\" (UniqueName: \"kubernetes.io/projected/5c00da32-542e-45b4-837c-67fa08ff49d3-kube-api-access-mdggw\") pod \"swift-ring-rebalance-tzjh7\" (UID: \"5c00da32-542e-45b4-837c-67fa08ff49d3\") " pod="openstack/swift-ring-rebalance-tzjh7" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.791822 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mdbnl\" (UniqueName: \"kubernetes.io/projected/2bc65164-a535-4157-af1a-5c6c54e54198-kube-api-access-mdbnl\") pod \"swift-ring-rebalance-whbpx\" (UID: \"2bc65164-a535-4157-af1a-5c6c54e54198\") " pod="openstack/swift-ring-rebalance-whbpx" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.867134 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/2bc65164-a535-4157-af1a-5c6c54e54198-swiftconf\") pod \"2bc65164-a535-4157-af1a-5c6c54e54198\" (UID: \"2bc65164-a535-4157-af1a-5c6c54e54198\") " Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.867298 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bc65164-a535-4157-af1a-5c6c54e54198-combined-ca-bundle\") pod \"2bc65164-a535-4157-af1a-5c6c54e54198\" (UID: \"2bc65164-a535-4157-af1a-5c6c54e54198\") " Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.867630 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/2bc65164-a535-4157-af1a-5c6c54e54198-etc-swift\") pod \"2bc65164-a535-4157-af1a-5c6c54e54198\" (UID: \"2bc65164-a535-4157-af1a-5c6c54e54198\") " Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.867749 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/2bc65164-a535-4157-af1a-5c6c54e54198-ring-data-devices\") pod \"2bc65164-a535-4157-af1a-5c6c54e54198\" (UID: \"2bc65164-a535-4157-af1a-5c6c54e54198\") " Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.867896 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/2bc65164-a535-4157-af1a-5c6c54e54198-dispersionconf\") pod \"2bc65164-a535-4157-af1a-5c6c54e54198\" (UID: \"2bc65164-a535-4157-af1a-5c6c54e54198\") " Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.867987 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2bc65164-a535-4157-af1a-5c6c54e54198-scripts\") pod \"2bc65164-a535-4157-af1a-5c6c54e54198\" (UID: \"2bc65164-a535-4157-af1a-5c6c54e54198\") " Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.868133 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mdbnl\" (UniqueName: \"kubernetes.io/projected/2bc65164-a535-4157-af1a-5c6c54e54198-kube-api-access-mdbnl\") pod \"2bc65164-a535-4157-af1a-5c6c54e54198\" (UID: \"2bc65164-a535-4157-af1a-5c6c54e54198\") " Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.868011 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2bc65164-a535-4157-af1a-5c6c54e54198-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "2bc65164-a535-4157-af1a-5c6c54e54198" (UID: "2bc65164-a535-4157-af1a-5c6c54e54198"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.868494 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2bc65164-a535-4157-af1a-5c6c54e54198-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "2bc65164-a535-4157-af1a-5c6c54e54198" (UID: "2bc65164-a535-4157-af1a-5c6c54e54198"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.868625 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2bc65164-a535-4157-af1a-5c6c54e54198-scripts" (OuterVolumeSpecName: "scripts") pod "2bc65164-a535-4157-af1a-5c6c54e54198" (UID: "2bc65164-a535-4157-af1a-5c6c54e54198"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.871753 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2bc65164-a535-4157-af1a-5c6c54e54198-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "2bc65164-a535-4157-af1a-5c6c54e54198" (UID: "2bc65164-a535-4157-af1a-5c6c54e54198"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.871772 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2bc65164-a535-4157-af1a-5c6c54e54198-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2bc65164-a535-4157-af1a-5c6c54e54198" (UID: "2bc65164-a535-4157-af1a-5c6c54e54198"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.872172 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2bc65164-a535-4157-af1a-5c6c54e54198-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "2bc65164-a535-4157-af1a-5c6c54e54198" (UID: "2bc65164-a535-4157-af1a-5c6c54e54198"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.872261 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2bc65164-a535-4157-af1a-5c6c54e54198-kube-api-access-mdbnl" (OuterVolumeSpecName: "kube-api-access-mdbnl") pod "2bc65164-a535-4157-af1a-5c6c54e54198" (UID: "2bc65164-a535-4157-af1a-5c6c54e54198"). InnerVolumeSpecName "kube-api-access-mdbnl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.971191 4691 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/2bc65164-a535-4157-af1a-5c6c54e54198-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.971864 4691 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2bc65164-a535-4157-af1a-5c6c54e54198-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.971878 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mdbnl\" (UniqueName: \"kubernetes.io/projected/2bc65164-a535-4157-af1a-5c6c54e54198-kube-api-access-mdbnl\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.971894 4691 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/2bc65164-a535-4157-af1a-5c6c54e54198-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.971904 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bc65164-a535-4157-af1a-5c6c54e54198-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.971914 4691 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/2bc65164-a535-4157-af1a-5c6c54e54198-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.971924 4691 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/2bc65164-a535-4157-af1a-5c6c54e54198-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.974170 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-69bqt" Nov 24 08:12:18 crc kubenswrapper[4691]: I1124 08:12:18.978220 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-tzjh7" Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.077469 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/94ab9159-218c-42b9-9c38-8e0701f3eeef-etc-swift\") pod \"swift-storage-0\" (UID: \"94ab9159-218c-42b9-9c38-8e0701f3eeef\") " pod="openstack/swift-storage-0" Nov 24 08:12:19 crc kubenswrapper[4691]: E1124 08:12:19.077712 4691 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 24 08:12:19 crc kubenswrapper[4691]: E1124 08:12:19.077749 4691 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 24 08:12:19 crc kubenswrapper[4691]: E1124 08:12:19.077831 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/94ab9159-218c-42b9-9c38-8e0701f3eeef-etc-swift podName:94ab9159-218c-42b9-9c38-8e0701f3eeef nodeName:}" failed. No retries permitted until 2025-11-24 08:12:20.077804285 +0000 UTC m=+902.076753534 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/94ab9159-218c-42b9-9c38-8e0701f3eeef-etc-swift") pod "swift-storage-0" (UID: "94ab9159-218c-42b9-9c38-8e0701f3eeef") : configmap "swift-ring-files" not found Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.137619 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-s58t5" Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.178058 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nv5tf\" (UniqueName: \"kubernetes.io/projected/fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9-kube-api-access-nv5tf\") pod \"fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9\" (UID: \"fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9\") " Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.178425 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9-ovsdbserver-sb\") pod \"fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9\" (UID: \"fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9\") " Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.178613 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9-dns-svc\") pod \"fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9\" (UID: \"fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9\") " Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.178639 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9-ovsdbserver-nb\") pod \"fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9\" (UID: \"fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9\") " Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.178838 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9-config\") pod \"fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9\" (UID: \"fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9\") " Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.201858 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9-kube-api-access-nv5tf" (OuterVolumeSpecName: "kube-api-access-nv5tf") pod "fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9" (UID: "fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9"). InnerVolumeSpecName "kube-api-access-nv5tf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.241868 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9" (UID: "fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.265560 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9" (UID: "fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.267437 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9" (UID: "fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.270983 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9-config" (OuterVolumeSpecName: "config") pod "fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9" (UID: "fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.278320 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-tzjh7"] Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.281283 4691 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.281343 4691 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.281361 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9-config\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.281372 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nv5tf\" (UniqueName: \"kubernetes.io/projected/fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9-kube-api-access-nv5tf\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.281383 4691 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.681518 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.700256 4691 generic.go:334] "Generic (PLEG): container finished" podID="fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9" containerID="049ba051cea9ea3f0f6d81959a5a848163f2a603c8ec26e416f47d5b69926cc4" exitCode=0 Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.700635 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-s58t5" event={"ID":"fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9","Type":"ContainerDied","Data":"049ba051cea9ea3f0f6d81959a5a848163f2a603c8ec26e416f47d5b69926cc4"} Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.700692 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-s58t5" event={"ID":"fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9","Type":"ContainerDied","Data":"f5a826496bb18682c6c3959835285515529b80f7c9b7e8a75ce3124e89460eef"} Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.700708 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-s58t5" Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.700713 4691 scope.go:117] "RemoveContainer" containerID="049ba051cea9ea3f0f6d81959a5a848163f2a603c8ec26e416f47d5b69926cc4" Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.708939 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-tzjh7" event={"ID":"5c00da32-542e-45b4-837c-67fa08ff49d3","Type":"ContainerStarted","Data":"2d4b6734c5c581e64bca9c352d4003a1557fae7e56042af37b64277e2a37fce1"} Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.711597 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-dq5hb" event={"ID":"fac2b9b8-8c64-4322-9251-b22dc0e758ed","Type":"ContainerStarted","Data":"e6e2281672da1062336f30149b437c6019bb20363bbbb5195f4c30506d57d6f6"} Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.711655 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-whbpx" Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.711686 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-b8fbc5445-dq5hb" Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.731264 4691 scope.go:117] "RemoveContainer" containerID="d9012fefe11d0496f43c751a566d038aeec2dec5c960a140f2c518dbb028f5fb" Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.735140 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-b8fbc5445-dq5hb" podStartSLOduration=2.735123702 podStartE2EDuration="2.735123702s" podCreationTimestamp="2025-11-24 08:12:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:12:19.731463076 +0000 UTC m=+901.730412325" watchObservedRunningTime="2025-11-24 08:12:19.735123702 +0000 UTC m=+901.734072951" Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.761135 4691 scope.go:117] "RemoveContainer" containerID="049ba051cea9ea3f0f6d81959a5a848163f2a603c8ec26e416f47d5b69926cc4" Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.765224 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-whbpx"] Nov 24 08:12:19 crc kubenswrapper[4691]: E1124 08:12:19.769111 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"049ba051cea9ea3f0f6d81959a5a848163f2a603c8ec26e416f47d5b69926cc4\": container with ID starting with 049ba051cea9ea3f0f6d81959a5a848163f2a603c8ec26e416f47d5b69926cc4 not found: ID does not exist" containerID="049ba051cea9ea3f0f6d81959a5a848163f2a603c8ec26e416f47d5b69926cc4" Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.769164 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"049ba051cea9ea3f0f6d81959a5a848163f2a603c8ec26e416f47d5b69926cc4"} err="failed to get container status \"049ba051cea9ea3f0f6d81959a5a848163f2a603c8ec26e416f47d5b69926cc4\": rpc error: code = NotFound desc = could not find container \"049ba051cea9ea3f0f6d81959a5a848163f2a603c8ec26e416f47d5b69926cc4\": container with ID starting with 049ba051cea9ea3f0f6d81959a5a848163f2a603c8ec26e416f47d5b69926cc4 not found: ID does not exist" Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.769192 4691 scope.go:117] "RemoveContainer" containerID="d9012fefe11d0496f43c751a566d038aeec2dec5c960a140f2c518dbb028f5fb" Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.769732 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-whbpx"] Nov 24 08:12:19 crc kubenswrapper[4691]: E1124 08:12:19.769866 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9012fefe11d0496f43c751a566d038aeec2dec5c960a140f2c518dbb028f5fb\": container with ID starting with d9012fefe11d0496f43c751a566d038aeec2dec5c960a140f2c518dbb028f5fb not found: ID does not exist" containerID="d9012fefe11d0496f43c751a566d038aeec2dec5c960a140f2c518dbb028f5fb" Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.769903 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9012fefe11d0496f43c751a566d038aeec2dec5c960a140f2c518dbb028f5fb"} err="failed to get container status \"d9012fefe11d0496f43c751a566d038aeec2dec5c960a140f2c518dbb028f5fb\": rpc error: code = NotFound desc = could not find container \"d9012fefe11d0496f43c751a566d038aeec2dec5c960a140f2c518dbb028f5fb\": container with ID starting with d9012fefe11d0496f43c751a566d038aeec2dec5c960a140f2c518dbb028f5fb not found: ID does not exist" Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.782413 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-s58t5"] Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.784391 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 24 08:12:19 crc kubenswrapper[4691]: I1124 08:12:19.788152 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8554648995-s58t5"] Nov 24 08:12:20 crc kubenswrapper[4691]: I1124 08:12:20.097409 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/94ab9159-218c-42b9-9c38-8e0701f3eeef-etc-swift\") pod \"swift-storage-0\" (UID: \"94ab9159-218c-42b9-9c38-8e0701f3eeef\") " pod="openstack/swift-storage-0" Nov 24 08:12:20 crc kubenswrapper[4691]: E1124 08:12:20.097617 4691 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 24 08:12:20 crc kubenswrapper[4691]: E1124 08:12:20.097643 4691 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 24 08:12:20 crc kubenswrapper[4691]: E1124 08:12:20.097708 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/94ab9159-218c-42b9-9c38-8e0701f3eeef-etc-swift podName:94ab9159-218c-42b9-9c38-8e0701f3eeef nodeName:}" failed. No retries permitted until 2025-11-24 08:12:22.097686734 +0000 UTC m=+904.096635983 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/94ab9159-218c-42b9-9c38-8e0701f3eeef-etc-swift") pod "swift-storage-0" (UID: "94ab9159-218c-42b9-9c38-8e0701f3eeef") : configmap "swift-ring-files" not found Nov 24 08:12:20 crc kubenswrapper[4691]: I1124 08:12:20.775167 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2bc65164-a535-4157-af1a-5c6c54e54198" path="/var/lib/kubelet/pods/2bc65164-a535-4157-af1a-5c6c54e54198/volumes" Nov 24 08:12:20 crc kubenswrapper[4691]: I1124 08:12:20.775857 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9" path="/var/lib/kubelet/pods/fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9/volumes" Nov 24 08:12:22 crc kubenswrapper[4691]: I1124 08:12:22.141219 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/94ab9159-218c-42b9-9c38-8e0701f3eeef-etc-swift\") pod \"swift-storage-0\" (UID: \"94ab9159-218c-42b9-9c38-8e0701f3eeef\") " pod="openstack/swift-storage-0" Nov 24 08:12:22 crc kubenswrapper[4691]: E1124 08:12:22.141590 4691 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 24 08:12:22 crc kubenswrapper[4691]: E1124 08:12:22.141616 4691 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 24 08:12:22 crc kubenswrapper[4691]: E1124 08:12:22.141660 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/94ab9159-218c-42b9-9c38-8e0701f3eeef-etc-swift podName:94ab9159-218c-42b9-9c38-8e0701f3eeef nodeName:}" failed. No retries permitted until 2025-11-24 08:12:26.141645706 +0000 UTC m=+908.140594955 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/94ab9159-218c-42b9-9c38-8e0701f3eeef-etc-swift") pod "swift-storage-0" (UID: "94ab9159-218c-42b9-9c38-8e0701f3eeef") : configmap "swift-ring-files" not found Nov 24 08:12:22 crc kubenswrapper[4691]: I1124 08:12:22.737980 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-tzjh7" event={"ID":"5c00da32-542e-45b4-837c-67fa08ff49d3","Type":"ContainerStarted","Data":"a3bb6d716454d0a724012f299c808063ff75302b009b5828762dc546340d1e43"} Nov 24 08:12:22 crc kubenswrapper[4691]: I1124 08:12:22.773067 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-tzjh7" podStartSLOduration=1.759309282 podStartE2EDuration="4.773042309s" podCreationTimestamp="2025-11-24 08:12:18 +0000 UTC" firstStartedPulling="2025-11-24 08:12:19.296919804 +0000 UTC m=+901.295869053" lastFinishedPulling="2025-11-24 08:12:22.310652831 +0000 UTC m=+904.309602080" observedRunningTime="2025-11-24 08:12:22.764138981 +0000 UTC m=+904.763088230" watchObservedRunningTime="2025-11-24 08:12:22.773042309 +0000 UTC m=+904.771991558" Nov 24 08:12:24 crc kubenswrapper[4691]: I1124 08:12:24.804700 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-0a0f-account-create-trpcp"] Nov 24 08:12:24 crc kubenswrapper[4691]: E1124 08:12:24.807397 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9" containerName="dnsmasq-dns" Nov 24 08:12:24 crc kubenswrapper[4691]: I1124 08:12:24.807580 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9" containerName="dnsmasq-dns" Nov 24 08:12:24 crc kubenswrapper[4691]: E1124 08:12:24.807679 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9" containerName="init" Nov 24 08:12:24 crc kubenswrapper[4691]: I1124 08:12:24.807749 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9" containerName="init" Nov 24 08:12:24 crc kubenswrapper[4691]: I1124 08:12:24.808031 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd2c2eb8-8ce7-4973-98c2-b9d7a86969a9" containerName="dnsmasq-dns" Nov 24 08:12:24 crc kubenswrapper[4691]: I1124 08:12:24.809179 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-0a0f-account-create-trpcp" Nov 24 08:12:24 crc kubenswrapper[4691]: I1124 08:12:24.813187 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 24 08:12:24 crc kubenswrapper[4691]: I1124 08:12:24.824948 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-0a0f-account-create-trpcp"] Nov 24 08:12:24 crc kubenswrapper[4691]: I1124 08:12:24.867205 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-5559c"] Nov 24 08:12:24 crc kubenswrapper[4691]: I1124 08:12:24.868494 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-5559c" Nov 24 08:12:24 crc kubenswrapper[4691]: I1124 08:12:24.876155 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-5559c"] Nov 24 08:12:24 crc kubenswrapper[4691]: I1124 08:12:24.892193 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8kwr8\" (UniqueName: \"kubernetes.io/projected/0532c6d6-54c8-4920-856d-049cbc33863f-kube-api-access-8kwr8\") pod \"keystone-0a0f-account-create-trpcp\" (UID: \"0532c6d6-54c8-4920-856d-049cbc33863f\") " pod="openstack/keystone-0a0f-account-create-trpcp" Nov 24 08:12:24 crc kubenswrapper[4691]: I1124 08:12:24.892945 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0532c6d6-54c8-4920-856d-049cbc33863f-operator-scripts\") pod \"keystone-0a0f-account-create-trpcp\" (UID: \"0532c6d6-54c8-4920-856d-049cbc33863f\") " pod="openstack/keystone-0a0f-account-create-trpcp" Nov 24 08:12:24 crc kubenswrapper[4691]: I1124 08:12:24.994961 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-snxtm\" (UniqueName: \"kubernetes.io/projected/070a2b47-984b-4039-b05b-1953eec94bad-kube-api-access-snxtm\") pod \"keystone-db-create-5559c\" (UID: \"070a2b47-984b-4039-b05b-1953eec94bad\") " pod="openstack/keystone-db-create-5559c" Nov 24 08:12:24 crc kubenswrapper[4691]: I1124 08:12:24.995052 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8kwr8\" (UniqueName: \"kubernetes.io/projected/0532c6d6-54c8-4920-856d-049cbc33863f-kube-api-access-8kwr8\") pod \"keystone-0a0f-account-create-trpcp\" (UID: \"0532c6d6-54c8-4920-856d-049cbc33863f\") " pod="openstack/keystone-0a0f-account-create-trpcp" Nov 24 08:12:24 crc kubenswrapper[4691]: I1124 08:12:24.995117 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/070a2b47-984b-4039-b05b-1953eec94bad-operator-scripts\") pod \"keystone-db-create-5559c\" (UID: \"070a2b47-984b-4039-b05b-1953eec94bad\") " pod="openstack/keystone-db-create-5559c" Nov 24 08:12:24 crc kubenswrapper[4691]: I1124 08:12:24.995148 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0532c6d6-54c8-4920-856d-049cbc33863f-operator-scripts\") pod \"keystone-0a0f-account-create-trpcp\" (UID: \"0532c6d6-54c8-4920-856d-049cbc33863f\") " pod="openstack/keystone-0a0f-account-create-trpcp" Nov 24 08:12:24 crc kubenswrapper[4691]: I1124 08:12:24.995924 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0532c6d6-54c8-4920-856d-049cbc33863f-operator-scripts\") pod \"keystone-0a0f-account-create-trpcp\" (UID: \"0532c6d6-54c8-4920-856d-049cbc33863f\") " pod="openstack/keystone-0a0f-account-create-trpcp" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.016347 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8kwr8\" (UniqueName: \"kubernetes.io/projected/0532c6d6-54c8-4920-856d-049cbc33863f-kube-api-access-8kwr8\") pod \"keystone-0a0f-account-create-trpcp\" (UID: \"0532c6d6-54c8-4920-856d-049cbc33863f\") " pod="openstack/keystone-0a0f-account-create-trpcp" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.097241 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-snxtm\" (UniqueName: \"kubernetes.io/projected/070a2b47-984b-4039-b05b-1953eec94bad-kube-api-access-snxtm\") pod \"keystone-db-create-5559c\" (UID: \"070a2b47-984b-4039-b05b-1953eec94bad\") " pod="openstack/keystone-db-create-5559c" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.098487 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/070a2b47-984b-4039-b05b-1953eec94bad-operator-scripts\") pod \"keystone-db-create-5559c\" (UID: \"070a2b47-984b-4039-b05b-1953eec94bad\") " pod="openstack/keystone-db-create-5559c" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.099736 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/070a2b47-984b-4039-b05b-1953eec94bad-operator-scripts\") pod \"keystone-db-create-5559c\" (UID: \"070a2b47-984b-4039-b05b-1953eec94bad\") " pod="openstack/keystone-db-create-5559c" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.119353 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-snxtm\" (UniqueName: \"kubernetes.io/projected/070a2b47-984b-4039-b05b-1953eec94bad-kube-api-access-snxtm\") pod \"keystone-db-create-5559c\" (UID: \"070a2b47-984b-4039-b05b-1953eec94bad\") " pod="openstack/keystone-db-create-5559c" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.126470 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-q7ttd"] Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.127773 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-q7ttd" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.128747 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-0a0f-account-create-trpcp" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.159652 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-q7ttd"] Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.192521 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-5559c" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.200005 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c0b5edb1-0fd7-4165-9644-30ec96e3bf88-operator-scripts\") pod \"placement-db-create-q7ttd\" (UID: \"c0b5edb1-0fd7-4165-9644-30ec96e3bf88\") " pod="openstack/placement-db-create-q7ttd" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.200059 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f59vx\" (UniqueName: \"kubernetes.io/projected/c0b5edb1-0fd7-4165-9644-30ec96e3bf88-kube-api-access-f59vx\") pod \"placement-db-create-q7ttd\" (UID: \"c0b5edb1-0fd7-4165-9644-30ec96e3bf88\") " pod="openstack/placement-db-create-q7ttd" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.250042 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-7539-account-create-wnbqm"] Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.251250 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7539-account-create-wnbqm" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.258772 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.303326 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c0b5edb1-0fd7-4165-9644-30ec96e3bf88-operator-scripts\") pod \"placement-db-create-q7ttd\" (UID: \"c0b5edb1-0fd7-4165-9644-30ec96e3bf88\") " pod="openstack/placement-db-create-q7ttd" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.303382 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f59vx\" (UniqueName: \"kubernetes.io/projected/c0b5edb1-0fd7-4165-9644-30ec96e3bf88-kube-api-access-f59vx\") pod \"placement-db-create-q7ttd\" (UID: \"c0b5edb1-0fd7-4165-9644-30ec96e3bf88\") " pod="openstack/placement-db-create-q7ttd" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.303437 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8982bed2-9351-43d6-964d-85d5aa0003a7-operator-scripts\") pod \"placement-7539-account-create-wnbqm\" (UID: \"8982bed2-9351-43d6-964d-85d5aa0003a7\") " pod="openstack/placement-7539-account-create-wnbqm" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.303543 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vjwpr\" (UniqueName: \"kubernetes.io/projected/8982bed2-9351-43d6-964d-85d5aa0003a7-kube-api-access-vjwpr\") pod \"placement-7539-account-create-wnbqm\" (UID: \"8982bed2-9351-43d6-964d-85d5aa0003a7\") " pod="openstack/placement-7539-account-create-wnbqm" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.304489 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c0b5edb1-0fd7-4165-9644-30ec96e3bf88-operator-scripts\") pod \"placement-db-create-q7ttd\" (UID: \"c0b5edb1-0fd7-4165-9644-30ec96e3bf88\") " pod="openstack/placement-db-create-q7ttd" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.306186 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-7539-account-create-wnbqm"] Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.345309 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f59vx\" (UniqueName: \"kubernetes.io/projected/c0b5edb1-0fd7-4165-9644-30ec96e3bf88-kube-api-access-f59vx\") pod \"placement-db-create-q7ttd\" (UID: \"c0b5edb1-0fd7-4165-9644-30ec96e3bf88\") " pod="openstack/placement-db-create-q7ttd" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.420670 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8982bed2-9351-43d6-964d-85d5aa0003a7-operator-scripts\") pod \"placement-7539-account-create-wnbqm\" (UID: \"8982bed2-9351-43d6-964d-85d5aa0003a7\") " pod="openstack/placement-7539-account-create-wnbqm" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.420770 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vjwpr\" (UniqueName: \"kubernetes.io/projected/8982bed2-9351-43d6-964d-85d5aa0003a7-kube-api-access-vjwpr\") pod \"placement-7539-account-create-wnbqm\" (UID: \"8982bed2-9351-43d6-964d-85d5aa0003a7\") " pod="openstack/placement-7539-account-create-wnbqm" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.422158 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8982bed2-9351-43d6-964d-85d5aa0003a7-operator-scripts\") pod \"placement-7539-account-create-wnbqm\" (UID: \"8982bed2-9351-43d6-964d-85d5aa0003a7\") " pod="openstack/placement-7539-account-create-wnbqm" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.454623 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vjwpr\" (UniqueName: \"kubernetes.io/projected/8982bed2-9351-43d6-964d-85d5aa0003a7-kube-api-access-vjwpr\") pod \"placement-7539-account-create-wnbqm\" (UID: \"8982bed2-9351-43d6-964d-85d5aa0003a7\") " pod="openstack/placement-7539-account-create-wnbqm" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.466270 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-q7ttd" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.500802 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-b277-account-create-bksjl"] Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.501418 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7539-account-create-wnbqm" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.502410 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-b277-account-create-bksjl" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.505541 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-mjqw5"] Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.506792 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.507455 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-mjqw5" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.512346 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-mjqw5"] Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.526770 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-b277-account-create-bksjl"] Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.730175 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/74a59460-c08c-46eb-97c1-07609f197dee-operator-scripts\") pod \"glance-b277-account-create-bksjl\" (UID: \"74a59460-c08c-46eb-97c1-07609f197dee\") " pod="openstack/glance-b277-account-create-bksjl" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.730221 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pnlz5\" (UniqueName: \"kubernetes.io/projected/74a59460-c08c-46eb-97c1-07609f197dee-kube-api-access-pnlz5\") pod \"glance-b277-account-create-bksjl\" (UID: \"74a59460-c08c-46eb-97c1-07609f197dee\") " pod="openstack/glance-b277-account-create-bksjl" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.730259 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hf4zx\" (UniqueName: \"kubernetes.io/projected/6cae1b4e-964b-4f74-8ace-ec10292243fb-kube-api-access-hf4zx\") pod \"glance-db-create-mjqw5\" (UID: \"6cae1b4e-964b-4f74-8ace-ec10292243fb\") " pod="openstack/glance-db-create-mjqw5" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.730371 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6cae1b4e-964b-4f74-8ace-ec10292243fb-operator-scripts\") pod \"glance-db-create-mjqw5\" (UID: \"6cae1b4e-964b-4f74-8ace-ec10292243fb\") " pod="openstack/glance-db-create-mjqw5" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.836471 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6cae1b4e-964b-4f74-8ace-ec10292243fb-operator-scripts\") pod \"glance-db-create-mjqw5\" (UID: \"6cae1b4e-964b-4f74-8ace-ec10292243fb\") " pod="openstack/glance-db-create-mjqw5" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.835363 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6cae1b4e-964b-4f74-8ace-ec10292243fb-operator-scripts\") pod \"glance-db-create-mjqw5\" (UID: \"6cae1b4e-964b-4f74-8ace-ec10292243fb\") " pod="openstack/glance-db-create-mjqw5" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.836680 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/74a59460-c08c-46eb-97c1-07609f197dee-operator-scripts\") pod \"glance-b277-account-create-bksjl\" (UID: \"74a59460-c08c-46eb-97c1-07609f197dee\") " pod="openstack/glance-b277-account-create-bksjl" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.837206 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pnlz5\" (UniqueName: \"kubernetes.io/projected/74a59460-c08c-46eb-97c1-07609f197dee-kube-api-access-pnlz5\") pod \"glance-b277-account-create-bksjl\" (UID: \"74a59460-c08c-46eb-97c1-07609f197dee\") " pod="openstack/glance-b277-account-create-bksjl" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.837289 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hf4zx\" (UniqueName: \"kubernetes.io/projected/6cae1b4e-964b-4f74-8ace-ec10292243fb-kube-api-access-hf4zx\") pod \"glance-db-create-mjqw5\" (UID: \"6cae1b4e-964b-4f74-8ace-ec10292243fb\") " pod="openstack/glance-db-create-mjqw5" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.837922 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/74a59460-c08c-46eb-97c1-07609f197dee-operator-scripts\") pod \"glance-b277-account-create-bksjl\" (UID: \"74a59460-c08c-46eb-97c1-07609f197dee\") " pod="openstack/glance-b277-account-create-bksjl" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.859666 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pnlz5\" (UniqueName: \"kubernetes.io/projected/74a59460-c08c-46eb-97c1-07609f197dee-kube-api-access-pnlz5\") pod \"glance-b277-account-create-bksjl\" (UID: \"74a59460-c08c-46eb-97c1-07609f197dee\") " pod="openstack/glance-b277-account-create-bksjl" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.860487 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hf4zx\" (UniqueName: \"kubernetes.io/projected/6cae1b4e-964b-4f74-8ace-ec10292243fb-kube-api-access-hf4zx\") pod \"glance-db-create-mjqw5\" (UID: \"6cae1b4e-964b-4f74-8ace-ec10292243fb\") " pod="openstack/glance-db-create-mjqw5" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.871759 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-mjqw5" Nov 24 08:12:25 crc kubenswrapper[4691]: I1124 08:12:25.925620 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-0a0f-account-create-trpcp"] Nov 24 08:12:26 crc kubenswrapper[4691]: I1124 08:12:26.057688 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-5559c"] Nov 24 08:12:26 crc kubenswrapper[4691]: I1124 08:12:26.128312 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-b277-account-create-bksjl" Nov 24 08:12:26 crc kubenswrapper[4691]: I1124 08:12:26.147054 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/94ab9159-218c-42b9-9c38-8e0701f3eeef-etc-swift\") pod \"swift-storage-0\" (UID: \"94ab9159-218c-42b9-9c38-8e0701f3eeef\") " pod="openstack/swift-storage-0" Nov 24 08:12:26 crc kubenswrapper[4691]: E1124 08:12:26.147390 4691 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 24 08:12:26 crc kubenswrapper[4691]: E1124 08:12:26.147434 4691 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 24 08:12:26 crc kubenswrapper[4691]: E1124 08:12:26.147570 4691 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/94ab9159-218c-42b9-9c38-8e0701f3eeef-etc-swift podName:94ab9159-218c-42b9-9c38-8e0701f3eeef nodeName:}" failed. No retries permitted until 2025-11-24 08:12:34.147543657 +0000 UTC m=+916.146492906 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/94ab9159-218c-42b9-9c38-8e0701f3eeef-etc-swift") pod "swift-storage-0" (UID: "94ab9159-218c-42b9-9c38-8e0701f3eeef") : configmap "swift-ring-files" not found Nov 24 08:12:26 crc kubenswrapper[4691]: I1124 08:12:26.157629 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-7539-account-create-wnbqm"] Nov 24 08:12:26 crc kubenswrapper[4691]: I1124 08:12:26.252614 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-q7ttd"] Nov 24 08:12:26 crc kubenswrapper[4691]: I1124 08:12:26.403790 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-mjqw5"] Nov 24 08:12:26 crc kubenswrapper[4691]: W1124 08:12:26.416644 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6cae1b4e_964b_4f74_8ace_ec10292243fb.slice/crio-a128cfa6553d6e9a18e35cb94297980144a9816db69efe1c367324aa8b60feac WatchSource:0}: Error finding container a128cfa6553d6e9a18e35cb94297980144a9816db69efe1c367324aa8b60feac: Status 404 returned error can't find the container with id a128cfa6553d6e9a18e35cb94297980144a9816db69efe1c367324aa8b60feac Nov 24 08:12:26 crc kubenswrapper[4691]: I1124 08:12:26.652935 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-b277-account-create-bksjl"] Nov 24 08:12:26 crc kubenswrapper[4691]: W1124 08:12:26.657909 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod74a59460_c08c_46eb_97c1_07609f197dee.slice/crio-749a0fc940f937fb558c78b59411548c6d21be05f50f396dccfea1af365bba1c WatchSource:0}: Error finding container 749a0fc940f937fb558c78b59411548c6d21be05f50f396dccfea1af365bba1c: Status 404 returned error can't find the container with id 749a0fc940f937fb558c78b59411548c6d21be05f50f396dccfea1af365bba1c Nov 24 08:12:26 crc kubenswrapper[4691]: I1124 08:12:26.784342 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-mjqw5" event={"ID":"6cae1b4e-964b-4f74-8ace-ec10292243fb","Type":"ContainerStarted","Data":"22da12dae1417be7880c317676aa0973d20bdd55a2ab69418c94a8cdc852fe92"} Nov 24 08:12:26 crc kubenswrapper[4691]: I1124 08:12:26.784393 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-mjqw5" event={"ID":"6cae1b4e-964b-4f74-8ace-ec10292243fb","Type":"ContainerStarted","Data":"a128cfa6553d6e9a18e35cb94297980144a9816db69efe1c367324aa8b60feac"} Nov 24 08:12:26 crc kubenswrapper[4691]: I1124 08:12:26.790001 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-0a0f-account-create-trpcp" event={"ID":"0532c6d6-54c8-4920-856d-049cbc33863f","Type":"ContainerStarted","Data":"5b6af859292ef981db8c91f78740d2480e285acb102629cbf0c98d1978f57809"} Nov 24 08:12:26 crc kubenswrapper[4691]: I1124 08:12:26.790037 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-0a0f-account-create-trpcp" event={"ID":"0532c6d6-54c8-4920-856d-049cbc33863f","Type":"ContainerStarted","Data":"b6979e989f277330405d587d6a32e5d83ec12522e02dd01f56c17525bc7da6cb"} Nov 24 08:12:26 crc kubenswrapper[4691]: I1124 08:12:26.799033 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-create-mjqw5" podStartSLOduration=1.799012963 podStartE2EDuration="1.799012963s" podCreationTimestamp="2025-11-24 08:12:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:12:26.79891462 +0000 UTC m=+908.797863869" watchObservedRunningTime="2025-11-24 08:12:26.799012963 +0000 UTC m=+908.797962212" Nov 24 08:12:26 crc kubenswrapper[4691]: I1124 08:12:26.800065 4691 generic.go:334] "Generic (PLEG): container finished" podID="8982bed2-9351-43d6-964d-85d5aa0003a7" containerID="7010ab2affce9184727a1c99de5866f8009be86e52e9154027f161543b10f4c6" exitCode=0 Nov 24 08:12:26 crc kubenswrapper[4691]: I1124 08:12:26.800263 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7539-account-create-wnbqm" event={"ID":"8982bed2-9351-43d6-964d-85d5aa0003a7","Type":"ContainerDied","Data":"7010ab2affce9184727a1c99de5866f8009be86e52e9154027f161543b10f4c6"} Nov 24 08:12:26 crc kubenswrapper[4691]: I1124 08:12:26.800326 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7539-account-create-wnbqm" event={"ID":"8982bed2-9351-43d6-964d-85d5aa0003a7","Type":"ContainerStarted","Data":"8897441dd08c68ddae19489bde69762e50cd4be71bddd3fd476f570575a1a497"} Nov 24 08:12:26 crc kubenswrapper[4691]: I1124 08:12:26.805180 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-q7ttd" event={"ID":"c0b5edb1-0fd7-4165-9644-30ec96e3bf88","Type":"ContainerStarted","Data":"ef6a16148c5d039194bbc38f645fc84819158a17e73be2ea9035029ff79c2bbd"} Nov 24 08:12:26 crc kubenswrapper[4691]: I1124 08:12:26.805225 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-q7ttd" event={"ID":"c0b5edb1-0fd7-4165-9644-30ec96e3bf88","Type":"ContainerStarted","Data":"4310e66e62fa9d4fadb4e4846c30dd7e8509b2ef6d6443e4ef8288708e831ee1"} Nov 24 08:12:26 crc kubenswrapper[4691]: I1124 08:12:26.807389 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-b277-account-create-bksjl" event={"ID":"74a59460-c08c-46eb-97c1-07609f197dee","Type":"ContainerStarted","Data":"749a0fc940f937fb558c78b59411548c6d21be05f50f396dccfea1af365bba1c"} Nov 24 08:12:26 crc kubenswrapper[4691]: I1124 08:12:26.819285 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-5559c" event={"ID":"070a2b47-984b-4039-b05b-1953eec94bad","Type":"ContainerStarted","Data":"30777a7f80a51c4a40862fa2242b4fe079d0b51f4ef1456fe6e0a758957b7044"} Nov 24 08:12:26 crc kubenswrapper[4691]: I1124 08:12:26.819353 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-5559c" event={"ID":"070a2b47-984b-4039-b05b-1953eec94bad","Type":"ContainerStarted","Data":"b1d469493c8885192ee6eb45b168b5bdcf8974b9dae7065481ef42e8ba40dbb1"} Nov 24 08:12:26 crc kubenswrapper[4691]: I1124 08:12:26.821356 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-0a0f-account-create-trpcp" podStartSLOduration=2.821320001 podStartE2EDuration="2.821320001s" podCreationTimestamp="2025-11-24 08:12:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:12:26.815822451 +0000 UTC m=+908.814771700" watchObservedRunningTime="2025-11-24 08:12:26.821320001 +0000 UTC m=+908.820269250" Nov 24 08:12:26 crc kubenswrapper[4691]: I1124 08:12:26.843557 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-create-q7ttd" podStartSLOduration=1.843532325 podStartE2EDuration="1.843532325s" podCreationTimestamp="2025-11-24 08:12:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:12:26.839817498 +0000 UTC m=+908.838766747" watchObservedRunningTime="2025-11-24 08:12:26.843532325 +0000 UTC m=+908.842481574" Nov 24 08:12:27 crc kubenswrapper[4691]: I1124 08:12:27.541670 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-b8fbc5445-dq5hb" Nov 24 08:12:27 crc kubenswrapper[4691]: I1124 08:12:27.560501 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-5559c" podStartSLOduration=3.560441932 podStartE2EDuration="3.560441932s" podCreationTimestamp="2025-11-24 08:12:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:12:26.878910092 +0000 UTC m=+908.877859341" watchObservedRunningTime="2025-11-24 08:12:27.560441932 +0000 UTC m=+909.559391221" Nov 24 08:12:27 crc kubenswrapper[4691]: I1124 08:12:27.605201 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-nxrrf"] Nov 24 08:12:27 crc kubenswrapper[4691]: I1124 08:12:27.605506 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57d769cc4f-nxrrf" podUID="282ca99b-ddc6-4450-8f8c-6a8e40144d35" containerName="dnsmasq-dns" containerID="cri-o://16dbcab89fb2cac2d066a474a4404a9591de7b02220ea4458a8485d8d875d763" gracePeriod=10 Nov 24 08:12:27 crc kubenswrapper[4691]: I1124 08:12:27.842606 4691 generic.go:334] "Generic (PLEG): container finished" podID="282ca99b-ddc6-4450-8f8c-6a8e40144d35" containerID="16dbcab89fb2cac2d066a474a4404a9591de7b02220ea4458a8485d8d875d763" exitCode=0 Nov 24 08:12:27 crc kubenswrapper[4691]: I1124 08:12:27.842729 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-nxrrf" event={"ID":"282ca99b-ddc6-4450-8f8c-6a8e40144d35","Type":"ContainerDied","Data":"16dbcab89fb2cac2d066a474a4404a9591de7b02220ea4458a8485d8d875d763"} Nov 24 08:12:27 crc kubenswrapper[4691]: I1124 08:12:27.853610 4691 generic.go:334] "Generic (PLEG): container finished" podID="6cae1b4e-964b-4f74-8ace-ec10292243fb" containerID="22da12dae1417be7880c317676aa0973d20bdd55a2ab69418c94a8cdc852fe92" exitCode=0 Nov 24 08:12:27 crc kubenswrapper[4691]: I1124 08:12:27.853700 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-mjqw5" event={"ID":"6cae1b4e-964b-4f74-8ace-ec10292243fb","Type":"ContainerDied","Data":"22da12dae1417be7880c317676aa0973d20bdd55a2ab69418c94a8cdc852fe92"} Nov 24 08:12:27 crc kubenswrapper[4691]: I1124 08:12:27.856178 4691 generic.go:334] "Generic (PLEG): container finished" podID="0532c6d6-54c8-4920-856d-049cbc33863f" containerID="5b6af859292ef981db8c91f78740d2480e285acb102629cbf0c98d1978f57809" exitCode=0 Nov 24 08:12:27 crc kubenswrapper[4691]: I1124 08:12:27.856269 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-0a0f-account-create-trpcp" event={"ID":"0532c6d6-54c8-4920-856d-049cbc33863f","Type":"ContainerDied","Data":"5b6af859292ef981db8c91f78740d2480e285acb102629cbf0c98d1978f57809"} Nov 24 08:12:27 crc kubenswrapper[4691]: I1124 08:12:27.859331 4691 generic.go:334] "Generic (PLEG): container finished" podID="c0b5edb1-0fd7-4165-9644-30ec96e3bf88" containerID="ef6a16148c5d039194bbc38f645fc84819158a17e73be2ea9035029ff79c2bbd" exitCode=0 Nov 24 08:12:27 crc kubenswrapper[4691]: I1124 08:12:27.859396 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-q7ttd" event={"ID":"c0b5edb1-0fd7-4165-9644-30ec96e3bf88","Type":"ContainerDied","Data":"ef6a16148c5d039194bbc38f645fc84819158a17e73be2ea9035029ff79c2bbd"} Nov 24 08:12:27 crc kubenswrapper[4691]: I1124 08:12:27.866361 4691 generic.go:334] "Generic (PLEG): container finished" podID="74a59460-c08c-46eb-97c1-07609f197dee" containerID="37b4cd83135c3b23844154305c718d0813b2d753b757b3109e43bc22a35f7468" exitCode=0 Nov 24 08:12:27 crc kubenswrapper[4691]: I1124 08:12:27.866478 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-b277-account-create-bksjl" event={"ID":"74a59460-c08c-46eb-97c1-07609f197dee","Type":"ContainerDied","Data":"37b4cd83135c3b23844154305c718d0813b2d753b757b3109e43bc22a35f7468"} Nov 24 08:12:27 crc kubenswrapper[4691]: I1124 08:12:27.908946 4691 generic.go:334] "Generic (PLEG): container finished" podID="070a2b47-984b-4039-b05b-1953eec94bad" containerID="30777a7f80a51c4a40862fa2242b4fe079d0b51f4ef1456fe6e0a758957b7044" exitCode=0 Nov 24 08:12:27 crc kubenswrapper[4691]: I1124 08:12:27.909341 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-5559c" event={"ID":"070a2b47-984b-4039-b05b-1953eec94bad","Type":"ContainerDied","Data":"30777a7f80a51c4a40862fa2242b4fe079d0b51f4ef1456fe6e0a758957b7044"} Nov 24 08:12:28 crc kubenswrapper[4691]: I1124 08:12:28.309858 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-nxrrf" Nov 24 08:12:28 crc kubenswrapper[4691]: I1124 08:12:28.405126 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/282ca99b-ddc6-4450-8f8c-6a8e40144d35-config\") pod \"282ca99b-ddc6-4450-8f8c-6a8e40144d35\" (UID: \"282ca99b-ddc6-4450-8f8c-6a8e40144d35\") " Nov 24 08:12:28 crc kubenswrapper[4691]: I1124 08:12:28.405241 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/282ca99b-ddc6-4450-8f8c-6a8e40144d35-dns-svc\") pod \"282ca99b-ddc6-4450-8f8c-6a8e40144d35\" (UID: \"282ca99b-ddc6-4450-8f8c-6a8e40144d35\") " Nov 24 08:12:28 crc kubenswrapper[4691]: I1124 08:12:28.405373 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nrftx\" (UniqueName: \"kubernetes.io/projected/282ca99b-ddc6-4450-8f8c-6a8e40144d35-kube-api-access-nrftx\") pod \"282ca99b-ddc6-4450-8f8c-6a8e40144d35\" (UID: \"282ca99b-ddc6-4450-8f8c-6a8e40144d35\") " Nov 24 08:12:28 crc kubenswrapper[4691]: I1124 08:12:28.411844 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/282ca99b-ddc6-4450-8f8c-6a8e40144d35-kube-api-access-nrftx" (OuterVolumeSpecName: "kube-api-access-nrftx") pod "282ca99b-ddc6-4450-8f8c-6a8e40144d35" (UID: "282ca99b-ddc6-4450-8f8c-6a8e40144d35"). InnerVolumeSpecName "kube-api-access-nrftx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:12:28 crc kubenswrapper[4691]: I1124 08:12:28.450977 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/282ca99b-ddc6-4450-8f8c-6a8e40144d35-config" (OuterVolumeSpecName: "config") pod "282ca99b-ddc6-4450-8f8c-6a8e40144d35" (UID: "282ca99b-ddc6-4450-8f8c-6a8e40144d35"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:12:28 crc kubenswrapper[4691]: I1124 08:12:28.453435 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/282ca99b-ddc6-4450-8f8c-6a8e40144d35-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "282ca99b-ddc6-4450-8f8c-6a8e40144d35" (UID: "282ca99b-ddc6-4450-8f8c-6a8e40144d35"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:12:28 crc kubenswrapper[4691]: I1124 08:12:28.507923 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/282ca99b-ddc6-4450-8f8c-6a8e40144d35-config\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:28 crc kubenswrapper[4691]: I1124 08:12:28.507957 4691 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/282ca99b-ddc6-4450-8f8c-6a8e40144d35-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:28 crc kubenswrapper[4691]: I1124 08:12:28.507969 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nrftx\" (UniqueName: \"kubernetes.io/projected/282ca99b-ddc6-4450-8f8c-6a8e40144d35-kube-api-access-nrftx\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:28 crc kubenswrapper[4691]: I1124 08:12:28.515530 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7539-account-create-wnbqm" Nov 24 08:12:28 crc kubenswrapper[4691]: I1124 08:12:28.609392 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8982bed2-9351-43d6-964d-85d5aa0003a7-operator-scripts\") pod \"8982bed2-9351-43d6-964d-85d5aa0003a7\" (UID: \"8982bed2-9351-43d6-964d-85d5aa0003a7\") " Nov 24 08:12:28 crc kubenswrapper[4691]: I1124 08:12:28.609677 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vjwpr\" (UniqueName: \"kubernetes.io/projected/8982bed2-9351-43d6-964d-85d5aa0003a7-kube-api-access-vjwpr\") pod \"8982bed2-9351-43d6-964d-85d5aa0003a7\" (UID: \"8982bed2-9351-43d6-964d-85d5aa0003a7\") " Nov 24 08:12:28 crc kubenswrapper[4691]: I1124 08:12:28.610331 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8982bed2-9351-43d6-964d-85d5aa0003a7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8982bed2-9351-43d6-964d-85d5aa0003a7" (UID: "8982bed2-9351-43d6-964d-85d5aa0003a7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:12:28 crc kubenswrapper[4691]: I1124 08:12:28.613728 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8982bed2-9351-43d6-964d-85d5aa0003a7-kube-api-access-vjwpr" (OuterVolumeSpecName: "kube-api-access-vjwpr") pod "8982bed2-9351-43d6-964d-85d5aa0003a7" (UID: "8982bed2-9351-43d6-964d-85d5aa0003a7"). InnerVolumeSpecName "kube-api-access-vjwpr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:12:28 crc kubenswrapper[4691]: I1124 08:12:28.713834 4691 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8982bed2-9351-43d6-964d-85d5aa0003a7-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:28 crc kubenswrapper[4691]: I1124 08:12:28.713892 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vjwpr\" (UniqueName: \"kubernetes.io/projected/8982bed2-9351-43d6-964d-85d5aa0003a7-kube-api-access-vjwpr\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:28 crc kubenswrapper[4691]: I1124 08:12:28.918022 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7539-account-create-wnbqm" Nov 24 08:12:28 crc kubenswrapper[4691]: I1124 08:12:28.918009 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7539-account-create-wnbqm" event={"ID":"8982bed2-9351-43d6-964d-85d5aa0003a7","Type":"ContainerDied","Data":"8897441dd08c68ddae19489bde69762e50cd4be71bddd3fd476f570575a1a497"} Nov 24 08:12:28 crc kubenswrapper[4691]: I1124 08:12:28.918157 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8897441dd08c68ddae19489bde69762e50cd4be71bddd3fd476f570575a1a497" Nov 24 08:12:28 crc kubenswrapper[4691]: I1124 08:12:28.920607 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-nxrrf" event={"ID":"282ca99b-ddc6-4450-8f8c-6a8e40144d35","Type":"ContainerDied","Data":"b81080fd17f87f63e352e5b930b1060e343e573863bd93d58b38171a3ad86931"} Nov 24 08:12:28 crc kubenswrapper[4691]: I1124 08:12:28.920654 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-nxrrf" Nov 24 08:12:28 crc kubenswrapper[4691]: I1124 08:12:28.920747 4691 scope.go:117] "RemoveContainer" containerID="16dbcab89fb2cac2d066a474a4404a9591de7b02220ea4458a8485d8d875d763" Nov 24 08:12:28 crc kubenswrapper[4691]: I1124 08:12:28.979920 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-nxrrf"] Nov 24 08:12:28 crc kubenswrapper[4691]: I1124 08:12:28.987089 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-nxrrf"] Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.002440 4691 scope.go:117] "RemoveContainer" containerID="d89b820dc7e991a20d9a7b26e4cbda21da3e0b59b6c664adab6c93a68886e173" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.379074 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-b277-account-create-bksjl" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.431340 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/74a59460-c08c-46eb-97c1-07609f197dee-operator-scripts\") pod \"74a59460-c08c-46eb-97c1-07609f197dee\" (UID: \"74a59460-c08c-46eb-97c1-07609f197dee\") " Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.431410 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pnlz5\" (UniqueName: \"kubernetes.io/projected/74a59460-c08c-46eb-97c1-07609f197dee-kube-api-access-pnlz5\") pod \"74a59460-c08c-46eb-97c1-07609f197dee\" (UID: \"74a59460-c08c-46eb-97c1-07609f197dee\") " Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.434191 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/74a59460-c08c-46eb-97c1-07609f197dee-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "74a59460-c08c-46eb-97c1-07609f197dee" (UID: "74a59460-c08c-46eb-97c1-07609f197dee"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.456158 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/74a59460-c08c-46eb-97c1-07609f197dee-kube-api-access-pnlz5" (OuterVolumeSpecName: "kube-api-access-pnlz5") pod "74a59460-c08c-46eb-97c1-07609f197dee" (UID: "74a59460-c08c-46eb-97c1-07609f197dee"). InnerVolumeSpecName "kube-api-access-pnlz5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.504927 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-mjqw5" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.510840 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-0a0f-account-create-trpcp" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.517205 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-5559c" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.532876 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hf4zx\" (UniqueName: \"kubernetes.io/projected/6cae1b4e-964b-4f74-8ace-ec10292243fb-kube-api-access-hf4zx\") pod \"6cae1b4e-964b-4f74-8ace-ec10292243fb\" (UID: \"6cae1b4e-964b-4f74-8ace-ec10292243fb\") " Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.532974 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0532c6d6-54c8-4920-856d-049cbc33863f-operator-scripts\") pod \"0532c6d6-54c8-4920-856d-049cbc33863f\" (UID: \"0532c6d6-54c8-4920-856d-049cbc33863f\") " Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.533159 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8kwr8\" (UniqueName: \"kubernetes.io/projected/0532c6d6-54c8-4920-856d-049cbc33863f-kube-api-access-8kwr8\") pod \"0532c6d6-54c8-4920-856d-049cbc33863f\" (UID: \"0532c6d6-54c8-4920-856d-049cbc33863f\") " Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.533210 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6cae1b4e-964b-4f74-8ace-ec10292243fb-operator-scripts\") pod \"6cae1b4e-964b-4f74-8ace-ec10292243fb\" (UID: \"6cae1b4e-964b-4f74-8ace-ec10292243fb\") " Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.533795 4691 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/74a59460-c08c-46eb-97c1-07609f197dee-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.533821 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pnlz5\" (UniqueName: \"kubernetes.io/projected/74a59460-c08c-46eb-97c1-07609f197dee-kube-api-access-pnlz5\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.534475 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6cae1b4e-964b-4f74-8ace-ec10292243fb-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6cae1b4e-964b-4f74-8ace-ec10292243fb" (UID: "6cae1b4e-964b-4f74-8ace-ec10292243fb"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.534777 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0532c6d6-54c8-4920-856d-049cbc33863f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0532c6d6-54c8-4920-856d-049cbc33863f" (UID: "0532c6d6-54c8-4920-856d-049cbc33863f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.544575 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6cae1b4e-964b-4f74-8ace-ec10292243fb-kube-api-access-hf4zx" (OuterVolumeSpecName: "kube-api-access-hf4zx") pod "6cae1b4e-964b-4f74-8ace-ec10292243fb" (UID: "6cae1b4e-964b-4f74-8ace-ec10292243fb"). InnerVolumeSpecName "kube-api-access-hf4zx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.545785 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0532c6d6-54c8-4920-856d-049cbc33863f-kube-api-access-8kwr8" (OuterVolumeSpecName: "kube-api-access-8kwr8") pod "0532c6d6-54c8-4920-856d-049cbc33863f" (UID: "0532c6d6-54c8-4920-856d-049cbc33863f"). InnerVolumeSpecName "kube-api-access-8kwr8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.550835 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-q7ttd" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.635746 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f59vx\" (UniqueName: \"kubernetes.io/projected/c0b5edb1-0fd7-4165-9644-30ec96e3bf88-kube-api-access-f59vx\") pod \"c0b5edb1-0fd7-4165-9644-30ec96e3bf88\" (UID: \"c0b5edb1-0fd7-4165-9644-30ec96e3bf88\") " Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.635917 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-snxtm\" (UniqueName: \"kubernetes.io/projected/070a2b47-984b-4039-b05b-1953eec94bad-kube-api-access-snxtm\") pod \"070a2b47-984b-4039-b05b-1953eec94bad\" (UID: \"070a2b47-984b-4039-b05b-1953eec94bad\") " Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.636153 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/070a2b47-984b-4039-b05b-1953eec94bad-operator-scripts\") pod \"070a2b47-984b-4039-b05b-1953eec94bad\" (UID: \"070a2b47-984b-4039-b05b-1953eec94bad\") " Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.636180 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c0b5edb1-0fd7-4165-9644-30ec96e3bf88-operator-scripts\") pod \"c0b5edb1-0fd7-4165-9644-30ec96e3bf88\" (UID: \"c0b5edb1-0fd7-4165-9644-30ec96e3bf88\") " Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.636665 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8kwr8\" (UniqueName: \"kubernetes.io/projected/0532c6d6-54c8-4920-856d-049cbc33863f-kube-api-access-8kwr8\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.636681 4691 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6cae1b4e-964b-4f74-8ace-ec10292243fb-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.636692 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hf4zx\" (UniqueName: \"kubernetes.io/projected/6cae1b4e-964b-4f74-8ace-ec10292243fb-kube-api-access-hf4zx\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.636701 4691 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0532c6d6-54c8-4920-856d-049cbc33863f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.636789 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c0b5edb1-0fd7-4165-9644-30ec96e3bf88-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c0b5edb1-0fd7-4165-9644-30ec96e3bf88" (UID: "c0b5edb1-0fd7-4165-9644-30ec96e3bf88"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.636890 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/070a2b47-984b-4039-b05b-1953eec94bad-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "070a2b47-984b-4039-b05b-1953eec94bad" (UID: "070a2b47-984b-4039-b05b-1953eec94bad"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.639714 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/070a2b47-984b-4039-b05b-1953eec94bad-kube-api-access-snxtm" (OuterVolumeSpecName: "kube-api-access-snxtm") pod "070a2b47-984b-4039-b05b-1953eec94bad" (UID: "070a2b47-984b-4039-b05b-1953eec94bad"). InnerVolumeSpecName "kube-api-access-snxtm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.640179 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0b5edb1-0fd7-4165-9644-30ec96e3bf88-kube-api-access-f59vx" (OuterVolumeSpecName: "kube-api-access-f59vx") pod "c0b5edb1-0fd7-4165-9644-30ec96e3bf88" (UID: "c0b5edb1-0fd7-4165-9644-30ec96e3bf88"). InnerVolumeSpecName "kube-api-access-f59vx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.739032 4691 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/070a2b47-984b-4039-b05b-1953eec94bad-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.739364 4691 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c0b5edb1-0fd7-4165-9644-30ec96e3bf88-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.739378 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f59vx\" (UniqueName: \"kubernetes.io/projected/c0b5edb1-0fd7-4165-9644-30ec96e3bf88-kube-api-access-f59vx\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.739394 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-snxtm\" (UniqueName: \"kubernetes.io/projected/070a2b47-984b-4039-b05b-1953eec94bad-kube-api-access-snxtm\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.932327 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-mjqw5" event={"ID":"6cae1b4e-964b-4f74-8ace-ec10292243fb","Type":"ContainerDied","Data":"a128cfa6553d6e9a18e35cb94297980144a9816db69efe1c367324aa8b60feac"} Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.933905 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a128cfa6553d6e9a18e35cb94297980144a9816db69efe1c367324aa8b60feac" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.932388 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-mjqw5" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.940535 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-0a0f-account-create-trpcp" event={"ID":"0532c6d6-54c8-4920-856d-049cbc33863f","Type":"ContainerDied","Data":"b6979e989f277330405d587d6a32e5d83ec12522e02dd01f56c17525bc7da6cb"} Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.940599 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b6979e989f277330405d587d6a32e5d83ec12522e02dd01f56c17525bc7da6cb" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.940602 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-0a0f-account-create-trpcp" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.943110 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-q7ttd" event={"ID":"c0b5edb1-0fd7-4165-9644-30ec96e3bf88","Type":"ContainerDied","Data":"4310e66e62fa9d4fadb4e4846c30dd7e8509b2ef6d6443e4ef8288708e831ee1"} Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.943160 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4310e66e62fa9d4fadb4e4846c30dd7e8509b2ef6d6443e4ef8288708e831ee1" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.943265 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-q7ttd" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.946818 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-b277-account-create-bksjl" event={"ID":"74a59460-c08c-46eb-97c1-07609f197dee","Type":"ContainerDied","Data":"749a0fc940f937fb558c78b59411548c6d21be05f50f396dccfea1af365bba1c"} Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.946878 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="749a0fc940f937fb558c78b59411548c6d21be05f50f396dccfea1af365bba1c" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.946975 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-b277-account-create-bksjl" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.954261 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-5559c" Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.954355 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-5559c" event={"ID":"070a2b47-984b-4039-b05b-1953eec94bad","Type":"ContainerDied","Data":"b1d469493c8885192ee6eb45b168b5bdcf8974b9dae7065481ef42e8ba40dbb1"} Nov 24 08:12:29 crc kubenswrapper[4691]: I1124 08:12:29.954395 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b1d469493c8885192ee6eb45b168b5bdcf8974b9dae7065481ef42e8ba40dbb1" Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.454074 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.734223 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-2q47m"] Nov 24 08:12:30 crc kubenswrapper[4691]: E1124 08:12:30.734742 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0b5edb1-0fd7-4165-9644-30ec96e3bf88" containerName="mariadb-database-create" Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.734960 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0b5edb1-0fd7-4165-9644-30ec96e3bf88" containerName="mariadb-database-create" Nov 24 08:12:30 crc kubenswrapper[4691]: E1124 08:12:30.734990 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74a59460-c08c-46eb-97c1-07609f197dee" containerName="mariadb-account-create" Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.735001 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="74a59460-c08c-46eb-97c1-07609f197dee" containerName="mariadb-account-create" Nov 24 08:12:30 crc kubenswrapper[4691]: E1124 08:12:30.735034 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6cae1b4e-964b-4f74-8ace-ec10292243fb" containerName="mariadb-database-create" Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.735044 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="6cae1b4e-964b-4f74-8ace-ec10292243fb" containerName="mariadb-database-create" Nov 24 08:12:30 crc kubenswrapper[4691]: E1124 08:12:30.735062 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8982bed2-9351-43d6-964d-85d5aa0003a7" containerName="mariadb-account-create" Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.735073 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="8982bed2-9351-43d6-964d-85d5aa0003a7" containerName="mariadb-account-create" Nov 24 08:12:30 crc kubenswrapper[4691]: E1124 08:12:30.735088 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="070a2b47-984b-4039-b05b-1953eec94bad" containerName="mariadb-database-create" Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.735097 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="070a2b47-984b-4039-b05b-1953eec94bad" containerName="mariadb-database-create" Nov 24 08:12:30 crc kubenswrapper[4691]: E1124 08:12:30.735111 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="282ca99b-ddc6-4450-8f8c-6a8e40144d35" containerName="init" Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.735119 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="282ca99b-ddc6-4450-8f8c-6a8e40144d35" containerName="init" Nov 24 08:12:30 crc kubenswrapper[4691]: E1124 08:12:30.735137 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="282ca99b-ddc6-4450-8f8c-6a8e40144d35" containerName="dnsmasq-dns" Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.735145 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="282ca99b-ddc6-4450-8f8c-6a8e40144d35" containerName="dnsmasq-dns" Nov 24 08:12:30 crc kubenswrapper[4691]: E1124 08:12:30.735156 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0532c6d6-54c8-4920-856d-049cbc33863f" containerName="mariadb-account-create" Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.735165 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="0532c6d6-54c8-4920-856d-049cbc33863f" containerName="mariadb-account-create" Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.735364 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="6cae1b4e-964b-4f74-8ace-ec10292243fb" containerName="mariadb-database-create" Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.735382 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="8982bed2-9351-43d6-964d-85d5aa0003a7" containerName="mariadb-account-create" Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.735400 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="070a2b47-984b-4039-b05b-1953eec94bad" containerName="mariadb-database-create" Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.735413 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="74a59460-c08c-46eb-97c1-07609f197dee" containerName="mariadb-account-create" Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.735425 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0b5edb1-0fd7-4165-9644-30ec96e3bf88" containerName="mariadb-database-create" Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.735439 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="282ca99b-ddc6-4450-8f8c-6a8e40144d35" containerName="dnsmasq-dns" Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.735469 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="0532c6d6-54c8-4920-856d-049cbc33863f" containerName="mariadb-account-create" Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.736215 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-2q47m" Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.739587 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.740633 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-jd5g4" Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.760299 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6c7k5\" (UniqueName: \"kubernetes.io/projected/18752e43-a39c-4e17-bf83-831b8361d976-kube-api-access-6c7k5\") pod \"glance-db-sync-2q47m\" (UID: \"18752e43-a39c-4e17-bf83-831b8361d976\") " pod="openstack/glance-db-sync-2q47m" Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.760366 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18752e43-a39c-4e17-bf83-831b8361d976-config-data\") pod \"glance-db-sync-2q47m\" (UID: \"18752e43-a39c-4e17-bf83-831b8361d976\") " pod="openstack/glance-db-sync-2q47m" Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.760409 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18752e43-a39c-4e17-bf83-831b8361d976-combined-ca-bundle\") pod \"glance-db-sync-2q47m\" (UID: \"18752e43-a39c-4e17-bf83-831b8361d976\") " pod="openstack/glance-db-sync-2q47m" Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.760507 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-2q47m"] Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.760702 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/18752e43-a39c-4e17-bf83-831b8361d976-db-sync-config-data\") pod \"glance-db-sync-2q47m\" (UID: \"18752e43-a39c-4e17-bf83-831b8361d976\") " pod="openstack/glance-db-sync-2q47m" Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.773264 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="282ca99b-ddc6-4450-8f8c-6a8e40144d35" path="/var/lib/kubelet/pods/282ca99b-ddc6-4450-8f8c-6a8e40144d35/volumes" Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.862761 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/18752e43-a39c-4e17-bf83-831b8361d976-db-sync-config-data\") pod \"glance-db-sync-2q47m\" (UID: \"18752e43-a39c-4e17-bf83-831b8361d976\") " pod="openstack/glance-db-sync-2q47m" Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.862918 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6c7k5\" (UniqueName: \"kubernetes.io/projected/18752e43-a39c-4e17-bf83-831b8361d976-kube-api-access-6c7k5\") pod \"glance-db-sync-2q47m\" (UID: \"18752e43-a39c-4e17-bf83-831b8361d976\") " pod="openstack/glance-db-sync-2q47m" Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.862968 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18752e43-a39c-4e17-bf83-831b8361d976-config-data\") pod \"glance-db-sync-2q47m\" (UID: \"18752e43-a39c-4e17-bf83-831b8361d976\") " pod="openstack/glance-db-sync-2q47m" Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.862998 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18752e43-a39c-4e17-bf83-831b8361d976-combined-ca-bundle\") pod \"glance-db-sync-2q47m\" (UID: \"18752e43-a39c-4e17-bf83-831b8361d976\") " pod="openstack/glance-db-sync-2q47m" Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.869771 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18752e43-a39c-4e17-bf83-831b8361d976-config-data\") pod \"glance-db-sync-2q47m\" (UID: \"18752e43-a39c-4e17-bf83-831b8361d976\") " pod="openstack/glance-db-sync-2q47m" Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.870485 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/18752e43-a39c-4e17-bf83-831b8361d976-db-sync-config-data\") pod \"glance-db-sync-2q47m\" (UID: \"18752e43-a39c-4e17-bf83-831b8361d976\") " pod="openstack/glance-db-sync-2q47m" Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.874860 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18752e43-a39c-4e17-bf83-831b8361d976-combined-ca-bundle\") pod \"glance-db-sync-2q47m\" (UID: \"18752e43-a39c-4e17-bf83-831b8361d976\") " pod="openstack/glance-db-sync-2q47m" Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.880859 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6c7k5\" (UniqueName: \"kubernetes.io/projected/18752e43-a39c-4e17-bf83-831b8361d976-kube-api-access-6c7k5\") pod \"glance-db-sync-2q47m\" (UID: \"18752e43-a39c-4e17-bf83-831b8361d976\") " pod="openstack/glance-db-sync-2q47m" Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.974290 4691 generic.go:334] "Generic (PLEG): container finished" podID="5c00da32-542e-45b4-837c-67fa08ff49d3" containerID="a3bb6d716454d0a724012f299c808063ff75302b009b5828762dc546340d1e43" exitCode=0 Nov 24 08:12:30 crc kubenswrapper[4691]: I1124 08:12:30.974357 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-tzjh7" event={"ID":"5c00da32-542e-45b4-837c-67fa08ff49d3","Type":"ContainerDied","Data":"a3bb6d716454d0a724012f299c808063ff75302b009b5828762dc546340d1e43"} Nov 24 08:12:31 crc kubenswrapper[4691]: I1124 08:12:31.095404 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-2q47m" Nov 24 08:12:31 crc kubenswrapper[4691]: I1124 08:12:31.695538 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-2q47m"] Nov 24 08:12:31 crc kubenswrapper[4691]: W1124 08:12:31.703653 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod18752e43_a39c_4e17_bf83_831b8361d976.slice/crio-fe95f05f8526e1a59756257d52e4501060efb7668b5ecb73c8eb5de2f107abb8 WatchSource:0}: Error finding container fe95f05f8526e1a59756257d52e4501060efb7668b5ecb73c8eb5de2f107abb8: Status 404 returned error can't find the container with id fe95f05f8526e1a59756257d52e4501060efb7668b5ecb73c8eb5de2f107abb8 Nov 24 08:12:31 crc kubenswrapper[4691]: I1124 08:12:31.985437 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-2q47m" event={"ID":"18752e43-a39c-4e17-bf83-831b8361d976","Type":"ContainerStarted","Data":"fe95f05f8526e1a59756257d52e4501060efb7668b5ecb73c8eb5de2f107abb8"} Nov 24 08:12:33 crc kubenswrapper[4691]: I1124 08:12:33.275861 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-tzjh7" Nov 24 08:12:33 crc kubenswrapper[4691]: I1124 08:12:33.304856 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5c00da32-542e-45b4-837c-67fa08ff49d3-scripts\") pod \"5c00da32-542e-45b4-837c-67fa08ff49d3\" (UID: \"5c00da32-542e-45b4-837c-67fa08ff49d3\") " Nov 24 08:12:33 crc kubenswrapper[4691]: I1124 08:12:33.304927 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/5c00da32-542e-45b4-837c-67fa08ff49d3-swiftconf\") pod \"5c00da32-542e-45b4-837c-67fa08ff49d3\" (UID: \"5c00da32-542e-45b4-837c-67fa08ff49d3\") " Nov 24 08:12:33 crc kubenswrapper[4691]: I1124 08:12:33.304967 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mdggw\" (UniqueName: \"kubernetes.io/projected/5c00da32-542e-45b4-837c-67fa08ff49d3-kube-api-access-mdggw\") pod \"5c00da32-542e-45b4-837c-67fa08ff49d3\" (UID: \"5c00da32-542e-45b4-837c-67fa08ff49d3\") " Nov 24 08:12:33 crc kubenswrapper[4691]: I1124 08:12:33.305072 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/5c00da32-542e-45b4-837c-67fa08ff49d3-ring-data-devices\") pod \"5c00da32-542e-45b4-837c-67fa08ff49d3\" (UID: \"5c00da32-542e-45b4-837c-67fa08ff49d3\") " Nov 24 08:12:33 crc kubenswrapper[4691]: I1124 08:12:33.305204 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/5c00da32-542e-45b4-837c-67fa08ff49d3-dispersionconf\") pod \"5c00da32-542e-45b4-837c-67fa08ff49d3\" (UID: \"5c00da32-542e-45b4-837c-67fa08ff49d3\") " Nov 24 08:12:33 crc kubenswrapper[4691]: I1124 08:12:33.305230 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/5c00da32-542e-45b4-837c-67fa08ff49d3-etc-swift\") pod \"5c00da32-542e-45b4-837c-67fa08ff49d3\" (UID: \"5c00da32-542e-45b4-837c-67fa08ff49d3\") " Nov 24 08:12:33 crc kubenswrapper[4691]: I1124 08:12:33.305300 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c00da32-542e-45b4-837c-67fa08ff49d3-combined-ca-bundle\") pod \"5c00da32-542e-45b4-837c-67fa08ff49d3\" (UID: \"5c00da32-542e-45b4-837c-67fa08ff49d3\") " Nov 24 08:12:33 crc kubenswrapper[4691]: I1124 08:12:33.316002 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5c00da32-542e-45b4-837c-67fa08ff49d3-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "5c00da32-542e-45b4-837c-67fa08ff49d3" (UID: "5c00da32-542e-45b4-837c-67fa08ff49d3"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:12:33 crc kubenswrapper[4691]: I1124 08:12:33.321830 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c00da32-542e-45b4-837c-67fa08ff49d3-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "5c00da32-542e-45b4-837c-67fa08ff49d3" (UID: "5c00da32-542e-45b4-837c-67fa08ff49d3"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:12:33 crc kubenswrapper[4691]: I1124 08:12:33.327288 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c00da32-542e-45b4-837c-67fa08ff49d3-kube-api-access-mdggw" (OuterVolumeSpecName: "kube-api-access-mdggw") pod "5c00da32-542e-45b4-837c-67fa08ff49d3" (UID: "5c00da32-542e-45b4-837c-67fa08ff49d3"). InnerVolumeSpecName "kube-api-access-mdggw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:12:33 crc kubenswrapper[4691]: I1124 08:12:33.335410 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c00da32-542e-45b4-837c-67fa08ff49d3-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "5c00da32-542e-45b4-837c-67fa08ff49d3" (UID: "5c00da32-542e-45b4-837c-67fa08ff49d3"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:12:33 crc kubenswrapper[4691]: I1124 08:12:33.336064 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c00da32-542e-45b4-837c-67fa08ff49d3-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "5c00da32-542e-45b4-837c-67fa08ff49d3" (UID: "5c00da32-542e-45b4-837c-67fa08ff49d3"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:12:33 crc kubenswrapper[4691]: I1124 08:12:33.338009 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c00da32-542e-45b4-837c-67fa08ff49d3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5c00da32-542e-45b4-837c-67fa08ff49d3" (UID: "5c00da32-542e-45b4-837c-67fa08ff49d3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:12:33 crc kubenswrapper[4691]: I1124 08:12:33.341236 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5c00da32-542e-45b4-837c-67fa08ff49d3-scripts" (OuterVolumeSpecName: "scripts") pod "5c00da32-542e-45b4-837c-67fa08ff49d3" (UID: "5c00da32-542e-45b4-837c-67fa08ff49d3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:12:33 crc kubenswrapper[4691]: I1124 08:12:33.408271 4691 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/5c00da32-542e-45b4-837c-67fa08ff49d3-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:33 crc kubenswrapper[4691]: I1124 08:12:33.408313 4691 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/5c00da32-542e-45b4-837c-67fa08ff49d3-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:33 crc kubenswrapper[4691]: I1124 08:12:33.408323 4691 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/5c00da32-542e-45b4-837c-67fa08ff49d3-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:33 crc kubenswrapper[4691]: I1124 08:12:33.408332 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c00da32-542e-45b4-837c-67fa08ff49d3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:33 crc kubenswrapper[4691]: I1124 08:12:33.408343 4691 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5c00da32-542e-45b4-837c-67fa08ff49d3-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:33 crc kubenswrapper[4691]: I1124 08:12:33.408354 4691 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/5c00da32-542e-45b4-837c-67fa08ff49d3-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:33 crc kubenswrapper[4691]: I1124 08:12:33.408366 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mdggw\" (UniqueName: \"kubernetes.io/projected/5c00da32-542e-45b4-837c-67fa08ff49d3-kube-api-access-mdggw\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:34 crc kubenswrapper[4691]: I1124 08:12:34.004814 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-tzjh7" event={"ID":"5c00da32-542e-45b4-837c-67fa08ff49d3","Type":"ContainerDied","Data":"2d4b6734c5c581e64bca9c352d4003a1557fae7e56042af37b64277e2a37fce1"} Nov 24 08:12:34 crc kubenswrapper[4691]: I1124 08:12:34.004869 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-tzjh7" Nov 24 08:12:34 crc kubenswrapper[4691]: I1124 08:12:34.004890 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2d4b6734c5c581e64bca9c352d4003a1557fae7e56042af37b64277e2a37fce1" Nov 24 08:12:34 crc kubenswrapper[4691]: I1124 08:12:34.223144 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/94ab9159-218c-42b9-9c38-8e0701f3eeef-etc-swift\") pod \"swift-storage-0\" (UID: \"94ab9159-218c-42b9-9c38-8e0701f3eeef\") " pod="openstack/swift-storage-0" Nov 24 08:12:34 crc kubenswrapper[4691]: I1124 08:12:34.243289 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/94ab9159-218c-42b9-9c38-8e0701f3eeef-etc-swift\") pod \"swift-storage-0\" (UID: \"94ab9159-218c-42b9-9c38-8e0701f3eeef\") " pod="openstack/swift-storage-0" Nov 24 08:12:34 crc kubenswrapper[4691]: I1124 08:12:34.277356 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 24 08:12:34 crc kubenswrapper[4691]: W1124 08:12:34.936290 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod94ab9159_218c_42b9_9c38_8e0701f3eeef.slice/crio-ada0d9403bb1aafd6a83416a22918fa92f0ac9aaa240fa58af062979bb768045 WatchSource:0}: Error finding container ada0d9403bb1aafd6a83416a22918fa92f0ac9aaa240fa58af062979bb768045: Status 404 returned error can't find the container with id ada0d9403bb1aafd6a83416a22918fa92f0ac9aaa240fa58af062979bb768045 Nov 24 08:12:34 crc kubenswrapper[4691]: I1124 08:12:34.937368 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 24 08:12:35 crc kubenswrapper[4691]: I1124 08:12:35.013664 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"94ab9159-218c-42b9-9c38-8e0701f3eeef","Type":"ContainerStarted","Data":"ada0d9403bb1aafd6a83416a22918fa92f0ac9aaa240fa58af062979bb768045"} Nov 24 08:12:37 crc kubenswrapper[4691]: I1124 08:12:37.036609 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"94ab9159-218c-42b9-9c38-8e0701f3eeef","Type":"ContainerStarted","Data":"54fd41ff753ab6df7e77ca159477eeef9c80d69f77e290cee4a6b83d54c86cdc"} Nov 24 08:12:37 crc kubenswrapper[4691]: I1124 08:12:37.037296 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"94ab9159-218c-42b9-9c38-8e0701f3eeef","Type":"ContainerStarted","Data":"e8f355c4951c8e27f42230ee1428cfeaa83391b923c65d82eb57effe5ec37201"} Nov 24 08:12:37 crc kubenswrapper[4691]: I1124 08:12:37.037313 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"94ab9159-218c-42b9-9c38-8e0701f3eeef","Type":"ContainerStarted","Data":"358055e948b6df9819d0d44cacfaed38870e2d78333dee485e467960b085a8f6"} Nov 24 08:12:38 crc kubenswrapper[4691]: I1124 08:12:38.047755 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"94ab9159-218c-42b9-9c38-8e0701f3eeef","Type":"ContainerStarted","Data":"8cbad2b0e428ef1a2d3ec39105d22896150198e3a4671d0925c852962fac8d45"} Nov 24 08:12:39 crc kubenswrapper[4691]: I1124 08:12:39.061584 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"94ab9159-218c-42b9-9c38-8e0701f3eeef","Type":"ContainerStarted","Data":"3682e4d3232398b76dff23191905bd0c163929e661038a328cdcca017f7d26c0"} Nov 24 08:12:39 crc kubenswrapper[4691]: I1124 08:12:39.061920 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"94ab9159-218c-42b9-9c38-8e0701f3eeef","Type":"ContainerStarted","Data":"1bf257038f37304d82e7c7bd84092c90fde9190e1045d876831211527636bf49"} Nov 24 08:12:39 crc kubenswrapper[4691]: I1124 08:12:39.061931 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"94ab9159-218c-42b9-9c38-8e0701f3eeef","Type":"ContainerStarted","Data":"dacdd421718ad3a1577b3635f2699c8d5dcd82d31d72a614882f89c26303a0ea"} Nov 24 08:12:41 crc kubenswrapper[4691]: I1124 08:12:41.075011 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-jknmq" podUID="204a8833-cf7b-491a-b06a-0c983a6aa30a" containerName="ovn-controller" probeResult="failure" output=< Nov 24 08:12:41 crc kubenswrapper[4691]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 24 08:12:41 crc kubenswrapper[4691]: > Nov 24 08:12:41 crc kubenswrapper[4691]: I1124 08:12:41.083926 4691 generic.go:334] "Generic (PLEG): container finished" podID="60038211-87c8-4170-8fd0-35df8a16aa92" containerID="24e84f0b81dd6e2ede17b256a9363d2077a55960ca9ab99278dcc179fc0c7785" exitCode=0 Nov 24 08:12:41 crc kubenswrapper[4691]: I1124 08:12:41.084000 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"60038211-87c8-4170-8fd0-35df8a16aa92","Type":"ContainerDied","Data":"24e84f0b81dd6e2ede17b256a9363d2077a55960ca9ab99278dcc179fc0c7785"} Nov 24 08:12:41 crc kubenswrapper[4691]: I1124 08:12:41.091551 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"94ab9159-218c-42b9-9c38-8e0701f3eeef","Type":"ContainerStarted","Data":"0995526f48828624a70a3307eb22b3bf76cdf309eb919511acfa9efd0b4b8d99"} Nov 24 08:12:41 crc kubenswrapper[4691]: I1124 08:12:41.093541 4691 generic.go:334] "Generic (PLEG): container finished" podID="224d72d8-5d0a-48df-8930-2cb28fc1fd93" containerID="b2ba845015f693518c179a10b585e65cf75a15ce74c8f20767c76c2d6e35d283" exitCode=0 Nov 24 08:12:41 crc kubenswrapper[4691]: I1124 08:12:41.093573 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"224d72d8-5d0a-48df-8930-2cb28fc1fd93","Type":"ContainerDied","Data":"b2ba845015f693518c179a10b585e65cf75a15ce74c8f20767c76c2d6e35d283"} Nov 24 08:12:41 crc kubenswrapper[4691]: I1124 08:12:41.142404 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-pkx2n" Nov 24 08:12:41 crc kubenswrapper[4691]: I1124 08:12:41.145235 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-pkx2n" Nov 24 08:12:41 crc kubenswrapper[4691]: I1124 08:12:41.491816 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-jknmq-config-hgbbz"] Nov 24 08:12:41 crc kubenswrapper[4691]: E1124 08:12:41.492215 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c00da32-542e-45b4-837c-67fa08ff49d3" containerName="swift-ring-rebalance" Nov 24 08:12:41 crc kubenswrapper[4691]: I1124 08:12:41.492231 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c00da32-542e-45b4-837c-67fa08ff49d3" containerName="swift-ring-rebalance" Nov 24 08:12:41 crc kubenswrapper[4691]: I1124 08:12:41.492375 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c00da32-542e-45b4-837c-67fa08ff49d3" containerName="swift-ring-rebalance" Nov 24 08:12:41 crc kubenswrapper[4691]: I1124 08:12:41.492975 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jknmq-config-hgbbz" Nov 24 08:12:41 crc kubenswrapper[4691]: I1124 08:12:41.495648 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 24 08:12:41 crc kubenswrapper[4691]: I1124 08:12:41.507698 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-jknmq-config-hgbbz"] Nov 24 08:12:41 crc kubenswrapper[4691]: I1124 08:12:41.669122 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-scripts\") pod \"ovn-controller-jknmq-config-hgbbz\" (UID: \"2e3ba982-bc5e-4f2a-adad-6d7961c87b52\") " pod="openstack/ovn-controller-jknmq-config-hgbbz" Nov 24 08:12:41 crc kubenswrapper[4691]: I1124 08:12:41.669170 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-var-run\") pod \"ovn-controller-jknmq-config-hgbbz\" (UID: \"2e3ba982-bc5e-4f2a-adad-6d7961c87b52\") " pod="openstack/ovn-controller-jknmq-config-hgbbz" Nov 24 08:12:41 crc kubenswrapper[4691]: I1124 08:12:41.669191 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-additional-scripts\") pod \"ovn-controller-jknmq-config-hgbbz\" (UID: \"2e3ba982-bc5e-4f2a-adad-6d7961c87b52\") " pod="openstack/ovn-controller-jknmq-config-hgbbz" Nov 24 08:12:41 crc kubenswrapper[4691]: I1124 08:12:41.669276 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-var-log-ovn\") pod \"ovn-controller-jknmq-config-hgbbz\" (UID: \"2e3ba982-bc5e-4f2a-adad-6d7961c87b52\") " pod="openstack/ovn-controller-jknmq-config-hgbbz" Nov 24 08:12:41 crc kubenswrapper[4691]: I1124 08:12:41.669314 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vmv6z\" (UniqueName: \"kubernetes.io/projected/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-kube-api-access-vmv6z\") pod \"ovn-controller-jknmq-config-hgbbz\" (UID: \"2e3ba982-bc5e-4f2a-adad-6d7961c87b52\") " pod="openstack/ovn-controller-jknmq-config-hgbbz" Nov 24 08:12:41 crc kubenswrapper[4691]: I1124 08:12:41.669369 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-var-run-ovn\") pod \"ovn-controller-jknmq-config-hgbbz\" (UID: \"2e3ba982-bc5e-4f2a-adad-6d7961c87b52\") " pod="openstack/ovn-controller-jknmq-config-hgbbz" Nov 24 08:12:41 crc kubenswrapper[4691]: I1124 08:12:41.771108 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-var-log-ovn\") pod \"ovn-controller-jknmq-config-hgbbz\" (UID: \"2e3ba982-bc5e-4f2a-adad-6d7961c87b52\") " pod="openstack/ovn-controller-jknmq-config-hgbbz" Nov 24 08:12:41 crc kubenswrapper[4691]: I1124 08:12:41.771178 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vmv6z\" (UniqueName: \"kubernetes.io/projected/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-kube-api-access-vmv6z\") pod \"ovn-controller-jknmq-config-hgbbz\" (UID: \"2e3ba982-bc5e-4f2a-adad-6d7961c87b52\") " pod="openstack/ovn-controller-jknmq-config-hgbbz" Nov 24 08:12:41 crc kubenswrapper[4691]: I1124 08:12:41.771209 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-var-run-ovn\") pod \"ovn-controller-jknmq-config-hgbbz\" (UID: \"2e3ba982-bc5e-4f2a-adad-6d7961c87b52\") " pod="openstack/ovn-controller-jknmq-config-hgbbz" Nov 24 08:12:41 crc kubenswrapper[4691]: I1124 08:12:41.771306 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-scripts\") pod \"ovn-controller-jknmq-config-hgbbz\" (UID: \"2e3ba982-bc5e-4f2a-adad-6d7961c87b52\") " pod="openstack/ovn-controller-jknmq-config-hgbbz" Nov 24 08:12:41 crc kubenswrapper[4691]: I1124 08:12:41.771328 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-var-run\") pod \"ovn-controller-jknmq-config-hgbbz\" (UID: \"2e3ba982-bc5e-4f2a-adad-6d7961c87b52\") " pod="openstack/ovn-controller-jknmq-config-hgbbz" Nov 24 08:12:41 crc kubenswrapper[4691]: I1124 08:12:41.771357 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-additional-scripts\") pod \"ovn-controller-jknmq-config-hgbbz\" (UID: \"2e3ba982-bc5e-4f2a-adad-6d7961c87b52\") " pod="openstack/ovn-controller-jknmq-config-hgbbz" Nov 24 08:12:41 crc kubenswrapper[4691]: I1124 08:12:41.772505 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-var-log-ovn\") pod \"ovn-controller-jknmq-config-hgbbz\" (UID: \"2e3ba982-bc5e-4f2a-adad-6d7961c87b52\") " pod="openstack/ovn-controller-jknmq-config-hgbbz" Nov 24 08:12:41 crc kubenswrapper[4691]: I1124 08:12:41.772881 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-var-run-ovn\") pod \"ovn-controller-jknmq-config-hgbbz\" (UID: \"2e3ba982-bc5e-4f2a-adad-6d7961c87b52\") " pod="openstack/ovn-controller-jknmq-config-hgbbz" Nov 24 08:12:41 crc kubenswrapper[4691]: I1124 08:12:41.772140 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-additional-scripts\") pod \"ovn-controller-jknmq-config-hgbbz\" (UID: \"2e3ba982-bc5e-4f2a-adad-6d7961c87b52\") " pod="openstack/ovn-controller-jknmq-config-hgbbz" Nov 24 08:12:41 crc kubenswrapper[4691]: I1124 08:12:41.774563 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-var-run\") pod \"ovn-controller-jknmq-config-hgbbz\" (UID: \"2e3ba982-bc5e-4f2a-adad-6d7961c87b52\") " pod="openstack/ovn-controller-jknmq-config-hgbbz" Nov 24 08:12:41 crc kubenswrapper[4691]: I1124 08:12:41.775229 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-scripts\") pod \"ovn-controller-jknmq-config-hgbbz\" (UID: \"2e3ba982-bc5e-4f2a-adad-6d7961c87b52\") " pod="openstack/ovn-controller-jknmq-config-hgbbz" Nov 24 08:12:41 crc kubenswrapper[4691]: I1124 08:12:41.793423 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vmv6z\" (UniqueName: \"kubernetes.io/projected/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-kube-api-access-vmv6z\") pod \"ovn-controller-jknmq-config-hgbbz\" (UID: \"2e3ba982-bc5e-4f2a-adad-6d7961c87b52\") " pod="openstack/ovn-controller-jknmq-config-hgbbz" Nov 24 08:12:41 crc kubenswrapper[4691]: I1124 08:12:41.831830 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jknmq-config-hgbbz" Nov 24 08:12:46 crc kubenswrapper[4691]: I1124 08:12:46.078368 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-jknmq" podUID="204a8833-cf7b-491a-b06a-0c983a6aa30a" containerName="ovn-controller" probeResult="failure" output=< Nov 24 08:12:46 crc kubenswrapper[4691]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 24 08:12:46 crc kubenswrapper[4691]: > Nov 24 08:12:47 crc kubenswrapper[4691]: I1124 08:12:47.158702 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"60038211-87c8-4170-8fd0-35df8a16aa92","Type":"ContainerStarted","Data":"5484e06182bf74af6a6cbfcea515b199c54cae93a2775659f4f489265709e66f"} Nov 24 08:12:47 crc kubenswrapper[4691]: I1124 08:12:47.159704 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:12:47 crc kubenswrapper[4691]: I1124 08:12:47.163374 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"224d72d8-5d0a-48df-8930-2cb28fc1fd93","Type":"ContainerStarted","Data":"cecf5374f4f9703c942989740b54a5434cff6537997e0fb2b48d18ffe7aa244a"} Nov 24 08:12:47 crc kubenswrapper[4691]: I1124 08:12:47.163653 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 24 08:12:47 crc kubenswrapper[4691]: I1124 08:12:47.188873 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=53.90571326 podStartE2EDuration="1m6.188845666s" podCreationTimestamp="2025-11-24 08:11:41 +0000 UTC" firstStartedPulling="2025-11-24 08:11:53.630551314 +0000 UTC m=+875.629500563" lastFinishedPulling="2025-11-24 08:12:05.91368372 +0000 UTC m=+887.912632969" observedRunningTime="2025-11-24 08:12:47.182706858 +0000 UTC m=+929.181656117" watchObservedRunningTime="2025-11-24 08:12:47.188845666 +0000 UTC m=+929.187794915" Nov 24 08:12:47 crc kubenswrapper[4691]: I1124 08:12:47.221035 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=57.329148954 podStartE2EDuration="1m7.22101435s" podCreationTimestamp="2025-11-24 08:11:40 +0000 UTC" firstStartedPulling="2025-11-24 08:11:55.838979928 +0000 UTC m=+877.837929177" lastFinishedPulling="2025-11-24 08:12:05.730845314 +0000 UTC m=+887.729794573" observedRunningTime="2025-11-24 08:12:47.215547451 +0000 UTC m=+929.214496700" watchObservedRunningTime="2025-11-24 08:12:47.22101435 +0000 UTC m=+929.219963599" Nov 24 08:12:47 crc kubenswrapper[4691]: I1124 08:12:47.229557 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-jknmq-config-hgbbz"] Nov 24 08:12:47 crc kubenswrapper[4691]: W1124 08:12:47.369252 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2e3ba982_bc5e_4f2a_adad_6d7961c87b52.slice/crio-97a996eee4ce84b3322c144f420db0ca02acaa79df3ecf59b46d5220f9184404 WatchSource:0}: Error finding container 97a996eee4ce84b3322c144f420db0ca02acaa79df3ecf59b46d5220f9184404: Status 404 returned error can't find the container with id 97a996eee4ce84b3322c144f420db0ca02acaa79df3ecf59b46d5220f9184404 Nov 24 08:12:48 crc kubenswrapper[4691]: I1124 08:12:48.175874 4691 generic.go:334] "Generic (PLEG): container finished" podID="2e3ba982-bc5e-4f2a-adad-6d7961c87b52" containerID="cf4cee1eb31a43ad3f71bc69a62e364c38c702de3457ba5dde1baddd7638adf8" exitCode=0 Nov 24 08:12:48 crc kubenswrapper[4691]: I1124 08:12:48.175920 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-jknmq-config-hgbbz" event={"ID":"2e3ba982-bc5e-4f2a-adad-6d7961c87b52","Type":"ContainerDied","Data":"cf4cee1eb31a43ad3f71bc69a62e364c38c702de3457ba5dde1baddd7638adf8"} Nov 24 08:12:48 crc kubenswrapper[4691]: I1124 08:12:48.176434 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-jknmq-config-hgbbz" event={"ID":"2e3ba982-bc5e-4f2a-adad-6d7961c87b52","Type":"ContainerStarted","Data":"97a996eee4ce84b3322c144f420db0ca02acaa79df3ecf59b46d5220f9184404"} Nov 24 08:12:48 crc kubenswrapper[4691]: I1124 08:12:48.181272 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-2q47m" event={"ID":"18752e43-a39c-4e17-bf83-831b8361d976","Type":"ContainerStarted","Data":"229545ef18ae187001be86fc5cccd8cf4173dd8de0e2605703b7161d4581c422"} Nov 24 08:12:48 crc kubenswrapper[4691]: I1124 08:12:48.199817 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"94ab9159-218c-42b9-9c38-8e0701f3eeef","Type":"ContainerStarted","Data":"425b59e016fb55689efcd4f99b41749f43333a0e1782fdba55ec510a1dc22408"} Nov 24 08:12:48 crc kubenswrapper[4691]: I1124 08:12:48.199858 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"94ab9159-218c-42b9-9c38-8e0701f3eeef","Type":"ContainerStarted","Data":"796b583992c32fe0df282a1d8dfb1cd0acaddfba6af497a7711212c8f5f54ac0"} Nov 24 08:12:48 crc kubenswrapper[4691]: I1124 08:12:48.199869 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"94ab9159-218c-42b9-9c38-8e0701f3eeef","Type":"ContainerStarted","Data":"d33c35258b1d837181ea529cb9ac05981cbd2858de62ff4bca7ffa19ab848749"} Nov 24 08:12:48 crc kubenswrapper[4691]: I1124 08:12:48.225559 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-2q47m" podStartSLOduration=3.104534314 podStartE2EDuration="18.225539102s" podCreationTimestamp="2025-11-24 08:12:30 +0000 UTC" firstStartedPulling="2025-11-24 08:12:31.705559874 +0000 UTC m=+913.704509123" lastFinishedPulling="2025-11-24 08:12:46.826564662 +0000 UTC m=+928.825513911" observedRunningTime="2025-11-24 08:12:48.220790895 +0000 UTC m=+930.219740144" watchObservedRunningTime="2025-11-24 08:12:48.225539102 +0000 UTC m=+930.224488351" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.219852 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"94ab9159-218c-42b9-9c38-8e0701f3eeef","Type":"ContainerStarted","Data":"664f90e30f9c8e259ba831712bfb6aaa4dd4b1019f565001a5c6c824fb2b1d28"} Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.220216 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"94ab9159-218c-42b9-9c38-8e0701f3eeef","Type":"ContainerStarted","Data":"5d62308ff514f37191faaa898bd409dd09c8f812ba56ed878983befa3094eda8"} Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.220233 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"94ab9159-218c-42b9-9c38-8e0701f3eeef","Type":"ContainerStarted","Data":"4076926b1a37b21776beef743088d2bde156305687d40e9d0e708fa6edd61583"} Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.220247 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"94ab9159-218c-42b9-9c38-8e0701f3eeef","Type":"ContainerStarted","Data":"f3e40d4a16613c39c1145cd71c8fc287796f21290294c682d362eb50d4bcc90c"} Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.556002 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=20.059120985 podStartE2EDuration="32.555978865s" podCreationTimestamp="2025-11-24 08:12:17 +0000 UTC" firstStartedPulling="2025-11-24 08:12:34.938909633 +0000 UTC m=+916.937858882" lastFinishedPulling="2025-11-24 08:12:47.435767513 +0000 UTC m=+929.434716762" observedRunningTime="2025-11-24 08:12:49.257907135 +0000 UTC m=+931.256856404" watchObservedRunningTime="2025-11-24 08:12:49.555978865 +0000 UTC m=+931.554928124" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.563371 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-bqhqp"] Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.564941 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-bqhqp" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.571035 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.580410 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jknmq-config-hgbbz" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.591483 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-bqhqp"] Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.616807 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vmv6z\" (UniqueName: \"kubernetes.io/projected/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-kube-api-access-vmv6z\") pod \"2e3ba982-bc5e-4f2a-adad-6d7961c87b52\" (UID: \"2e3ba982-bc5e-4f2a-adad-6d7961c87b52\") " Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.620577 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-additional-scripts\") pod \"2e3ba982-bc5e-4f2a-adad-6d7961c87b52\" (UID: \"2e3ba982-bc5e-4f2a-adad-6d7961c87b52\") " Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.620785 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-var-run-ovn\") pod \"2e3ba982-bc5e-4f2a-adad-6d7961c87b52\" (UID: \"2e3ba982-bc5e-4f2a-adad-6d7961c87b52\") " Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.620831 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-var-run\") pod \"2e3ba982-bc5e-4f2a-adad-6d7961c87b52\" (UID: \"2e3ba982-bc5e-4f2a-adad-6d7961c87b52\") " Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.620904 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-scripts\") pod \"2e3ba982-bc5e-4f2a-adad-6d7961c87b52\" (UID: \"2e3ba982-bc5e-4f2a-adad-6d7961c87b52\") " Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.620947 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-var-log-ovn\") pod \"2e3ba982-bc5e-4f2a-adad-6d7961c87b52\" (UID: \"2e3ba982-bc5e-4f2a-adad-6d7961c87b52\") " Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.621680 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-dns-svc\") pod \"dnsmasq-dns-5c79d794d7-bqhqp\" (UID: \"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e\") " pod="openstack/dnsmasq-dns-5c79d794d7-bqhqp" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.621782 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-dns-swift-storage-0\") pod \"dnsmasq-dns-5c79d794d7-bqhqp\" (UID: \"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e\") " pod="openstack/dnsmasq-dns-5c79d794d7-bqhqp" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.621918 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-ovsdbserver-nb\") pod \"dnsmasq-dns-5c79d794d7-bqhqp\" (UID: \"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e\") " pod="openstack/dnsmasq-dns-5c79d794d7-bqhqp" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.621958 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-config\") pod \"dnsmasq-dns-5c79d794d7-bqhqp\" (UID: \"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e\") " pod="openstack/dnsmasq-dns-5c79d794d7-bqhqp" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.622061 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-ovsdbserver-sb\") pod \"dnsmasq-dns-5c79d794d7-bqhqp\" (UID: \"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e\") " pod="openstack/dnsmasq-dns-5c79d794d7-bqhqp" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.622132 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wvtrb\" (UniqueName: \"kubernetes.io/projected/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-kube-api-access-wvtrb\") pod \"dnsmasq-dns-5c79d794d7-bqhqp\" (UID: \"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e\") " pod="openstack/dnsmasq-dns-5c79d794d7-bqhqp" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.623023 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "2e3ba982-bc5e-4f2a-adad-6d7961c87b52" (UID: "2e3ba982-bc5e-4f2a-adad-6d7961c87b52"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.623073 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "2e3ba982-bc5e-4f2a-adad-6d7961c87b52" (UID: "2e3ba982-bc5e-4f2a-adad-6d7961c87b52"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.623100 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-var-run" (OuterVolumeSpecName: "var-run") pod "2e3ba982-bc5e-4f2a-adad-6d7961c87b52" (UID: "2e3ba982-bc5e-4f2a-adad-6d7961c87b52"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.623998 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-scripts" (OuterVolumeSpecName: "scripts") pod "2e3ba982-bc5e-4f2a-adad-6d7961c87b52" (UID: "2e3ba982-bc5e-4f2a-adad-6d7961c87b52"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.624044 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "2e3ba982-bc5e-4f2a-adad-6d7961c87b52" (UID: "2e3ba982-bc5e-4f2a-adad-6d7961c87b52"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.631404 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-kube-api-access-vmv6z" (OuterVolumeSpecName: "kube-api-access-vmv6z") pod "2e3ba982-bc5e-4f2a-adad-6d7961c87b52" (UID: "2e3ba982-bc5e-4f2a-adad-6d7961c87b52"). InnerVolumeSpecName "kube-api-access-vmv6z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.723907 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-ovsdbserver-sb\") pod \"dnsmasq-dns-5c79d794d7-bqhqp\" (UID: \"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e\") " pod="openstack/dnsmasq-dns-5c79d794d7-bqhqp" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.723978 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wvtrb\" (UniqueName: \"kubernetes.io/projected/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-kube-api-access-wvtrb\") pod \"dnsmasq-dns-5c79d794d7-bqhqp\" (UID: \"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e\") " pod="openstack/dnsmasq-dns-5c79d794d7-bqhqp" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.724045 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-dns-svc\") pod \"dnsmasq-dns-5c79d794d7-bqhqp\" (UID: \"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e\") " pod="openstack/dnsmasq-dns-5c79d794d7-bqhqp" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.724075 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-dns-swift-storage-0\") pod \"dnsmasq-dns-5c79d794d7-bqhqp\" (UID: \"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e\") " pod="openstack/dnsmasq-dns-5c79d794d7-bqhqp" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.724118 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-ovsdbserver-nb\") pod \"dnsmasq-dns-5c79d794d7-bqhqp\" (UID: \"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e\") " pod="openstack/dnsmasq-dns-5c79d794d7-bqhqp" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.724137 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-config\") pod \"dnsmasq-dns-5c79d794d7-bqhqp\" (UID: \"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e\") " pod="openstack/dnsmasq-dns-5c79d794d7-bqhqp" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.724198 4691 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.724211 4691 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-var-run\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.724232 4691 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.724245 4691 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.724257 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vmv6z\" (UniqueName: \"kubernetes.io/projected/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-kube-api-access-vmv6z\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.724271 4691 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/2e3ba982-bc5e-4f2a-adad-6d7961c87b52-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.725234 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-ovsdbserver-nb\") pod \"dnsmasq-dns-5c79d794d7-bqhqp\" (UID: \"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e\") " pod="openstack/dnsmasq-dns-5c79d794d7-bqhqp" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.725234 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-dns-swift-storage-0\") pod \"dnsmasq-dns-5c79d794d7-bqhqp\" (UID: \"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e\") " pod="openstack/dnsmasq-dns-5c79d794d7-bqhqp" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.725354 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-config\") pod \"dnsmasq-dns-5c79d794d7-bqhqp\" (UID: \"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e\") " pod="openstack/dnsmasq-dns-5c79d794d7-bqhqp" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.725903 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-dns-svc\") pod \"dnsmasq-dns-5c79d794d7-bqhqp\" (UID: \"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e\") " pod="openstack/dnsmasq-dns-5c79d794d7-bqhqp" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.726061 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-ovsdbserver-sb\") pod \"dnsmasq-dns-5c79d794d7-bqhqp\" (UID: \"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e\") " pod="openstack/dnsmasq-dns-5c79d794d7-bqhqp" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.742639 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wvtrb\" (UniqueName: \"kubernetes.io/projected/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-kube-api-access-wvtrb\") pod \"dnsmasq-dns-5c79d794d7-bqhqp\" (UID: \"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e\") " pod="openstack/dnsmasq-dns-5c79d794d7-bqhqp" Nov 24 08:12:49 crc kubenswrapper[4691]: I1124 08:12:49.894566 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-bqhqp" Nov 24 08:12:50 crc kubenswrapper[4691]: I1124 08:12:50.228548 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jknmq-config-hgbbz" Nov 24 08:12:50 crc kubenswrapper[4691]: I1124 08:12:50.229610 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-jknmq-config-hgbbz" event={"ID":"2e3ba982-bc5e-4f2a-adad-6d7961c87b52","Type":"ContainerDied","Data":"97a996eee4ce84b3322c144f420db0ca02acaa79df3ecf59b46d5220f9184404"} Nov 24 08:12:50 crc kubenswrapper[4691]: I1124 08:12:50.229668 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="97a996eee4ce84b3322c144f420db0ca02acaa79df3ecf59b46d5220f9184404" Nov 24 08:12:50 crc kubenswrapper[4691]: I1124 08:12:50.357528 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-bqhqp"] Nov 24 08:12:50 crc kubenswrapper[4691]: I1124 08:12:50.727215 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-jknmq-config-hgbbz"] Nov 24 08:12:50 crc kubenswrapper[4691]: I1124 08:12:50.734627 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-jknmq-config-hgbbz"] Nov 24 08:12:50 crc kubenswrapper[4691]: I1124 08:12:50.773723 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e3ba982-bc5e-4f2a-adad-6d7961c87b52" path="/var/lib/kubelet/pods/2e3ba982-bc5e-4f2a-adad-6d7961c87b52/volumes" Nov 24 08:12:50 crc kubenswrapper[4691]: I1124 08:12:50.943937 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-jknmq-config-l6jtt"] Nov 24 08:12:50 crc kubenswrapper[4691]: E1124 08:12:50.944294 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e3ba982-bc5e-4f2a-adad-6d7961c87b52" containerName="ovn-config" Nov 24 08:12:50 crc kubenswrapper[4691]: I1124 08:12:50.944307 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e3ba982-bc5e-4f2a-adad-6d7961c87b52" containerName="ovn-config" Nov 24 08:12:50 crc kubenswrapper[4691]: I1124 08:12:50.944488 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e3ba982-bc5e-4f2a-adad-6d7961c87b52" containerName="ovn-config" Nov 24 08:12:50 crc kubenswrapper[4691]: I1124 08:12:50.945054 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jknmq-config-l6jtt" Nov 24 08:12:50 crc kubenswrapper[4691]: I1124 08:12:50.948192 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 24 08:12:50 crc kubenswrapper[4691]: I1124 08:12:50.959725 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-jknmq-config-l6jtt"] Nov 24 08:12:51 crc kubenswrapper[4691]: I1124 08:12:51.047385 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-additional-scripts\") pod \"ovn-controller-jknmq-config-l6jtt\" (UID: \"e16c2aa0-48e9-49ea-ba59-e08fede9f48d\") " pod="openstack/ovn-controller-jknmq-config-l6jtt" Nov 24 08:12:51 crc kubenswrapper[4691]: I1124 08:12:51.047507 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-scripts\") pod \"ovn-controller-jknmq-config-l6jtt\" (UID: \"e16c2aa0-48e9-49ea-ba59-e08fede9f48d\") " pod="openstack/ovn-controller-jknmq-config-l6jtt" Nov 24 08:12:51 crc kubenswrapper[4691]: I1124 08:12:51.047550 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5lgp\" (UniqueName: \"kubernetes.io/projected/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-kube-api-access-m5lgp\") pod \"ovn-controller-jknmq-config-l6jtt\" (UID: \"e16c2aa0-48e9-49ea-ba59-e08fede9f48d\") " pod="openstack/ovn-controller-jknmq-config-l6jtt" Nov 24 08:12:51 crc kubenswrapper[4691]: I1124 08:12:51.047676 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-var-log-ovn\") pod \"ovn-controller-jknmq-config-l6jtt\" (UID: \"e16c2aa0-48e9-49ea-ba59-e08fede9f48d\") " pod="openstack/ovn-controller-jknmq-config-l6jtt" Nov 24 08:12:51 crc kubenswrapper[4691]: I1124 08:12:51.047723 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-var-run\") pod \"ovn-controller-jknmq-config-l6jtt\" (UID: \"e16c2aa0-48e9-49ea-ba59-e08fede9f48d\") " pod="openstack/ovn-controller-jknmq-config-l6jtt" Nov 24 08:12:51 crc kubenswrapper[4691]: I1124 08:12:51.047738 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-var-run-ovn\") pod \"ovn-controller-jknmq-config-l6jtt\" (UID: \"e16c2aa0-48e9-49ea-ba59-e08fede9f48d\") " pod="openstack/ovn-controller-jknmq-config-l6jtt" Nov 24 08:12:51 crc kubenswrapper[4691]: I1124 08:12:51.089770 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:12:51 crc kubenswrapper[4691]: I1124 08:12:51.089838 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:12:51 crc kubenswrapper[4691]: I1124 08:12:51.103916 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-jknmq" Nov 24 08:12:51 crc kubenswrapper[4691]: I1124 08:12:51.149376 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-var-log-ovn\") pod \"ovn-controller-jknmq-config-l6jtt\" (UID: \"e16c2aa0-48e9-49ea-ba59-e08fede9f48d\") " pod="openstack/ovn-controller-jknmq-config-l6jtt" Nov 24 08:12:51 crc kubenswrapper[4691]: I1124 08:12:51.149492 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-var-run\") pod \"ovn-controller-jknmq-config-l6jtt\" (UID: \"e16c2aa0-48e9-49ea-ba59-e08fede9f48d\") " pod="openstack/ovn-controller-jknmq-config-l6jtt" Nov 24 08:12:51 crc kubenswrapper[4691]: I1124 08:12:51.149533 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-var-run-ovn\") pod \"ovn-controller-jknmq-config-l6jtt\" (UID: \"e16c2aa0-48e9-49ea-ba59-e08fede9f48d\") " pod="openstack/ovn-controller-jknmq-config-l6jtt" Nov 24 08:12:51 crc kubenswrapper[4691]: I1124 08:12:51.149557 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-additional-scripts\") pod \"ovn-controller-jknmq-config-l6jtt\" (UID: \"e16c2aa0-48e9-49ea-ba59-e08fede9f48d\") " pod="openstack/ovn-controller-jknmq-config-l6jtt" Nov 24 08:12:51 crc kubenswrapper[4691]: I1124 08:12:51.149619 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-scripts\") pod \"ovn-controller-jknmq-config-l6jtt\" (UID: \"e16c2aa0-48e9-49ea-ba59-e08fede9f48d\") " pod="openstack/ovn-controller-jknmq-config-l6jtt" Nov 24 08:12:51 crc kubenswrapper[4691]: I1124 08:12:51.149669 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5lgp\" (UniqueName: \"kubernetes.io/projected/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-kube-api-access-m5lgp\") pod \"ovn-controller-jknmq-config-l6jtt\" (UID: \"e16c2aa0-48e9-49ea-ba59-e08fede9f48d\") " pod="openstack/ovn-controller-jknmq-config-l6jtt" Nov 24 08:12:51 crc kubenswrapper[4691]: I1124 08:12:51.149849 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-var-run-ovn\") pod \"ovn-controller-jknmq-config-l6jtt\" (UID: \"e16c2aa0-48e9-49ea-ba59-e08fede9f48d\") " pod="openstack/ovn-controller-jknmq-config-l6jtt" Nov 24 08:12:51 crc kubenswrapper[4691]: I1124 08:12:51.149849 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-var-run\") pod \"ovn-controller-jknmq-config-l6jtt\" (UID: \"e16c2aa0-48e9-49ea-ba59-e08fede9f48d\") " pod="openstack/ovn-controller-jknmq-config-l6jtt" Nov 24 08:12:51 crc kubenswrapper[4691]: I1124 08:12:51.149855 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-var-log-ovn\") pod \"ovn-controller-jknmq-config-l6jtt\" (UID: \"e16c2aa0-48e9-49ea-ba59-e08fede9f48d\") " pod="openstack/ovn-controller-jknmq-config-l6jtt" Nov 24 08:12:51 crc kubenswrapper[4691]: I1124 08:12:51.150935 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-additional-scripts\") pod \"ovn-controller-jknmq-config-l6jtt\" (UID: \"e16c2aa0-48e9-49ea-ba59-e08fede9f48d\") " pod="openstack/ovn-controller-jknmq-config-l6jtt" Nov 24 08:12:51 crc kubenswrapper[4691]: I1124 08:12:51.153098 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-scripts\") pod \"ovn-controller-jknmq-config-l6jtt\" (UID: \"e16c2aa0-48e9-49ea-ba59-e08fede9f48d\") " pod="openstack/ovn-controller-jknmq-config-l6jtt" Nov 24 08:12:51 crc kubenswrapper[4691]: I1124 08:12:51.174036 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5lgp\" (UniqueName: \"kubernetes.io/projected/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-kube-api-access-m5lgp\") pod \"ovn-controller-jknmq-config-l6jtt\" (UID: \"e16c2aa0-48e9-49ea-ba59-e08fede9f48d\") " pod="openstack/ovn-controller-jknmq-config-l6jtt" Nov 24 08:12:51 crc kubenswrapper[4691]: I1124 08:12:51.238840 4691 generic.go:334] "Generic (PLEG): container finished" podID="bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e" containerID="c72600fcf0d69b8e2c0ef7c27e906684d5f44203f7f2d47b70884a9489b83968" exitCode=0 Nov 24 08:12:51 crc kubenswrapper[4691]: I1124 08:12:51.238898 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-bqhqp" event={"ID":"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e","Type":"ContainerDied","Data":"c72600fcf0d69b8e2c0ef7c27e906684d5f44203f7f2d47b70884a9489b83968"} Nov 24 08:12:51 crc kubenswrapper[4691]: I1124 08:12:51.238924 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-bqhqp" event={"ID":"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e","Type":"ContainerStarted","Data":"00b89c3a5e723dafd9fa3730384a23a399c9c827a009c238c4da3f5f84010547"} Nov 24 08:12:51 crc kubenswrapper[4691]: I1124 08:12:51.287009 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jknmq-config-l6jtt" Nov 24 08:12:51 crc kubenswrapper[4691]: W1124 08:12:51.817122 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode16c2aa0_48e9_49ea_ba59_e08fede9f48d.slice/crio-9b8924d4444744bd9f11309e02e179d679b68d5b500016e311c705b2ec0b4475 WatchSource:0}: Error finding container 9b8924d4444744bd9f11309e02e179d679b68d5b500016e311c705b2ec0b4475: Status 404 returned error can't find the container with id 9b8924d4444744bd9f11309e02e179d679b68d5b500016e311c705b2ec0b4475 Nov 24 08:12:51 crc kubenswrapper[4691]: I1124 08:12:51.817592 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-jknmq-config-l6jtt"] Nov 24 08:12:52 crc kubenswrapper[4691]: I1124 08:12:52.252438 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-bqhqp" event={"ID":"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e","Type":"ContainerStarted","Data":"f26ee8d5b6435cce772fc4e2bc12fab25639e1fd14838f40c584bc3afd66be64"} Nov 24 08:12:52 crc kubenswrapper[4691]: I1124 08:12:52.252838 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c79d794d7-bqhqp" Nov 24 08:12:52 crc kubenswrapper[4691]: I1124 08:12:52.254874 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-jknmq-config-l6jtt" event={"ID":"e16c2aa0-48e9-49ea-ba59-e08fede9f48d","Type":"ContainerStarted","Data":"ff17ebc5cb1c438b7cad9cf02c3461cbc5711563a201dba53ed7b11d80200ef4"} Nov 24 08:12:52 crc kubenswrapper[4691]: I1124 08:12:52.254943 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-jknmq-config-l6jtt" event={"ID":"e16c2aa0-48e9-49ea-ba59-e08fede9f48d","Type":"ContainerStarted","Data":"9b8924d4444744bd9f11309e02e179d679b68d5b500016e311c705b2ec0b4475"} Nov 24 08:12:52 crc kubenswrapper[4691]: I1124 08:12:52.282782 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c79d794d7-bqhqp" podStartSLOduration=3.282742743 podStartE2EDuration="3.282742743s" podCreationTimestamp="2025-11-24 08:12:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:12:52.278551881 +0000 UTC m=+934.277501140" watchObservedRunningTime="2025-11-24 08:12:52.282742743 +0000 UTC m=+934.281692002" Nov 24 08:12:52 crc kubenswrapper[4691]: E1124 08:12:52.527563 4691 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode16c2aa0_48e9_49ea_ba59_e08fede9f48d.slice/crio-ff17ebc5cb1c438b7cad9cf02c3461cbc5711563a201dba53ed7b11d80200ef4.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode16c2aa0_48e9_49ea_ba59_e08fede9f48d.slice/crio-conmon-ff17ebc5cb1c438b7cad9cf02c3461cbc5711563a201dba53ed7b11d80200ef4.scope\": RecentStats: unable to find data in memory cache]" Nov 24 08:12:53 crc kubenswrapper[4691]: I1124 08:12:53.264751 4691 generic.go:334] "Generic (PLEG): container finished" podID="e16c2aa0-48e9-49ea-ba59-e08fede9f48d" containerID="ff17ebc5cb1c438b7cad9cf02c3461cbc5711563a201dba53ed7b11d80200ef4" exitCode=0 Nov 24 08:12:53 crc kubenswrapper[4691]: I1124 08:12:53.264858 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-jknmq-config-l6jtt" event={"ID":"e16c2aa0-48e9-49ea-ba59-e08fede9f48d","Type":"ContainerDied","Data":"ff17ebc5cb1c438b7cad9cf02c3461cbc5711563a201dba53ed7b11d80200ef4"} Nov 24 08:12:54 crc kubenswrapper[4691]: I1124 08:12:54.733373 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jknmq-config-l6jtt" Nov 24 08:12:54 crc kubenswrapper[4691]: I1124 08:12:54.847392 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m5lgp\" (UniqueName: \"kubernetes.io/projected/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-kube-api-access-m5lgp\") pod \"e16c2aa0-48e9-49ea-ba59-e08fede9f48d\" (UID: \"e16c2aa0-48e9-49ea-ba59-e08fede9f48d\") " Nov 24 08:12:54 crc kubenswrapper[4691]: I1124 08:12:54.847698 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-var-run-ovn\") pod \"e16c2aa0-48e9-49ea-ba59-e08fede9f48d\" (UID: \"e16c2aa0-48e9-49ea-ba59-e08fede9f48d\") " Nov 24 08:12:54 crc kubenswrapper[4691]: I1124 08:12:54.847815 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "e16c2aa0-48e9-49ea-ba59-e08fede9f48d" (UID: "e16c2aa0-48e9-49ea-ba59-e08fede9f48d"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 08:12:54 crc kubenswrapper[4691]: I1124 08:12:54.847908 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-additional-scripts\") pod \"e16c2aa0-48e9-49ea-ba59-e08fede9f48d\" (UID: \"e16c2aa0-48e9-49ea-ba59-e08fede9f48d\") " Nov 24 08:12:54 crc kubenswrapper[4691]: I1124 08:12:54.848668 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "e16c2aa0-48e9-49ea-ba59-e08fede9f48d" (UID: "e16c2aa0-48e9-49ea-ba59-e08fede9f48d"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:12:54 crc kubenswrapper[4691]: I1124 08:12:54.848775 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-var-log-ovn\") pod \"e16c2aa0-48e9-49ea-ba59-e08fede9f48d\" (UID: \"e16c2aa0-48e9-49ea-ba59-e08fede9f48d\") " Nov 24 08:12:54 crc kubenswrapper[4691]: I1124 08:12:54.848913 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-scripts\") pod \"e16c2aa0-48e9-49ea-ba59-e08fede9f48d\" (UID: \"e16c2aa0-48e9-49ea-ba59-e08fede9f48d\") " Nov 24 08:12:54 crc kubenswrapper[4691]: I1124 08:12:54.848827 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "e16c2aa0-48e9-49ea-ba59-e08fede9f48d" (UID: "e16c2aa0-48e9-49ea-ba59-e08fede9f48d"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 08:12:54 crc kubenswrapper[4691]: I1124 08:12:54.850013 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-scripts" (OuterVolumeSpecName: "scripts") pod "e16c2aa0-48e9-49ea-ba59-e08fede9f48d" (UID: "e16c2aa0-48e9-49ea-ba59-e08fede9f48d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:12:54 crc kubenswrapper[4691]: I1124 08:12:54.850151 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-var-run" (OuterVolumeSpecName: "var-run") pod "e16c2aa0-48e9-49ea-ba59-e08fede9f48d" (UID: "e16c2aa0-48e9-49ea-ba59-e08fede9f48d"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 08:12:54 crc kubenswrapper[4691]: I1124 08:12:54.850178 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-var-run\") pod \"e16c2aa0-48e9-49ea-ba59-e08fede9f48d\" (UID: \"e16c2aa0-48e9-49ea-ba59-e08fede9f48d\") " Nov 24 08:12:54 crc kubenswrapper[4691]: I1124 08:12:54.851055 4691 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:54 crc kubenswrapper[4691]: I1124 08:12:54.851078 4691 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:54 crc kubenswrapper[4691]: I1124 08:12:54.851092 4691 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:54 crc kubenswrapper[4691]: I1124 08:12:54.851103 4691 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:54 crc kubenswrapper[4691]: I1124 08:12:54.856900 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-kube-api-access-m5lgp" (OuterVolumeSpecName: "kube-api-access-m5lgp") pod "e16c2aa0-48e9-49ea-ba59-e08fede9f48d" (UID: "e16c2aa0-48e9-49ea-ba59-e08fede9f48d"). InnerVolumeSpecName "kube-api-access-m5lgp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:12:54 crc kubenswrapper[4691]: I1124 08:12:54.902097 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-jknmq-config-l6jtt"] Nov 24 08:12:54 crc kubenswrapper[4691]: I1124 08:12:54.909168 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-jknmq-config-l6jtt"] Nov 24 08:12:54 crc kubenswrapper[4691]: I1124 08:12:54.952875 4691 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-var-run\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:54 crc kubenswrapper[4691]: I1124 08:12:54.952914 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m5lgp\" (UniqueName: \"kubernetes.io/projected/e16c2aa0-48e9-49ea-ba59-e08fede9f48d-kube-api-access-m5lgp\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:55 crc kubenswrapper[4691]: I1124 08:12:55.617415 4691 generic.go:334] "Generic (PLEG): container finished" podID="18752e43-a39c-4e17-bf83-831b8361d976" containerID="229545ef18ae187001be86fc5cccd8cf4173dd8de0e2605703b7161d4581c422" exitCode=0 Nov 24 08:12:55 crc kubenswrapper[4691]: I1124 08:12:55.617805 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-2q47m" event={"ID":"18752e43-a39c-4e17-bf83-831b8361d976","Type":"ContainerDied","Data":"229545ef18ae187001be86fc5cccd8cf4173dd8de0e2605703b7161d4581c422"} Nov 24 08:12:55 crc kubenswrapper[4691]: I1124 08:12:55.620914 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9b8924d4444744bd9f11309e02e179d679b68d5b500016e311c705b2ec0b4475" Nov 24 08:12:55 crc kubenswrapper[4691]: I1124 08:12:55.621005 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-jknmq-config-l6jtt" Nov 24 08:12:56 crc kubenswrapper[4691]: I1124 08:12:56.777658 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e16c2aa0-48e9-49ea-ba59-e08fede9f48d" path="/var/lib/kubelet/pods/e16c2aa0-48e9-49ea-ba59-e08fede9f48d/volumes" Nov 24 08:12:57 crc kubenswrapper[4691]: I1124 08:12:57.046662 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-2q47m" Nov 24 08:12:57 crc kubenswrapper[4691]: I1124 08:12:57.135290 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18752e43-a39c-4e17-bf83-831b8361d976-config-data\") pod \"18752e43-a39c-4e17-bf83-831b8361d976\" (UID: \"18752e43-a39c-4e17-bf83-831b8361d976\") " Nov 24 08:12:57 crc kubenswrapper[4691]: I1124 08:12:57.135417 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6c7k5\" (UniqueName: \"kubernetes.io/projected/18752e43-a39c-4e17-bf83-831b8361d976-kube-api-access-6c7k5\") pod \"18752e43-a39c-4e17-bf83-831b8361d976\" (UID: \"18752e43-a39c-4e17-bf83-831b8361d976\") " Nov 24 08:12:57 crc kubenswrapper[4691]: I1124 08:12:57.135502 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18752e43-a39c-4e17-bf83-831b8361d976-combined-ca-bundle\") pod \"18752e43-a39c-4e17-bf83-831b8361d976\" (UID: \"18752e43-a39c-4e17-bf83-831b8361d976\") " Nov 24 08:12:57 crc kubenswrapper[4691]: I1124 08:12:57.135558 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/18752e43-a39c-4e17-bf83-831b8361d976-db-sync-config-data\") pod \"18752e43-a39c-4e17-bf83-831b8361d976\" (UID: \"18752e43-a39c-4e17-bf83-831b8361d976\") " Nov 24 08:12:57 crc kubenswrapper[4691]: I1124 08:12:57.148724 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18752e43-a39c-4e17-bf83-831b8361d976-kube-api-access-6c7k5" (OuterVolumeSpecName: "kube-api-access-6c7k5") pod "18752e43-a39c-4e17-bf83-831b8361d976" (UID: "18752e43-a39c-4e17-bf83-831b8361d976"). InnerVolumeSpecName "kube-api-access-6c7k5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:12:57 crc kubenswrapper[4691]: I1124 08:12:57.156157 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18752e43-a39c-4e17-bf83-831b8361d976-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "18752e43-a39c-4e17-bf83-831b8361d976" (UID: "18752e43-a39c-4e17-bf83-831b8361d976"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:12:57 crc kubenswrapper[4691]: I1124 08:12:57.162639 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18752e43-a39c-4e17-bf83-831b8361d976-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "18752e43-a39c-4e17-bf83-831b8361d976" (UID: "18752e43-a39c-4e17-bf83-831b8361d976"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:12:57 crc kubenswrapper[4691]: I1124 08:12:57.180050 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18752e43-a39c-4e17-bf83-831b8361d976-config-data" (OuterVolumeSpecName: "config-data") pod "18752e43-a39c-4e17-bf83-831b8361d976" (UID: "18752e43-a39c-4e17-bf83-831b8361d976"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:12:57 crc kubenswrapper[4691]: I1124 08:12:57.238290 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6c7k5\" (UniqueName: \"kubernetes.io/projected/18752e43-a39c-4e17-bf83-831b8361d976-kube-api-access-6c7k5\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:57 crc kubenswrapper[4691]: I1124 08:12:57.238336 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18752e43-a39c-4e17-bf83-831b8361d976-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:57 crc kubenswrapper[4691]: I1124 08:12:57.238349 4691 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/18752e43-a39c-4e17-bf83-831b8361d976-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:57 crc kubenswrapper[4691]: I1124 08:12:57.238360 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18752e43-a39c-4e17-bf83-831b8361d976-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:57 crc kubenswrapper[4691]: I1124 08:12:57.640771 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-2q47m" event={"ID":"18752e43-a39c-4e17-bf83-831b8361d976","Type":"ContainerDied","Data":"fe95f05f8526e1a59756257d52e4501060efb7668b5ecb73c8eb5de2f107abb8"} Nov 24 08:12:57 crc kubenswrapper[4691]: I1124 08:12:57.641274 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe95f05f8526e1a59756257d52e4501060efb7668b5ecb73c8eb5de2f107abb8" Nov 24 08:12:57 crc kubenswrapper[4691]: I1124 08:12:57.640886 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-2q47m" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.121906 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-bqhqp"] Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.122244 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c79d794d7-bqhqp" podUID="bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e" containerName="dnsmasq-dns" containerID="cri-o://f26ee8d5b6435cce772fc4e2bc12fab25639e1fd14838f40c584bc3afd66be64" gracePeriod=10 Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.126816 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5c79d794d7-bqhqp" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.165838 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5f59b8f679-2vnf5"] Nov 24 08:12:58 crc kubenswrapper[4691]: E1124 08:12:58.166355 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e16c2aa0-48e9-49ea-ba59-e08fede9f48d" containerName="ovn-config" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.166374 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="e16c2aa0-48e9-49ea-ba59-e08fede9f48d" containerName="ovn-config" Nov 24 08:12:58 crc kubenswrapper[4691]: E1124 08:12:58.166384 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18752e43-a39c-4e17-bf83-831b8361d976" containerName="glance-db-sync" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.166394 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="18752e43-a39c-4e17-bf83-831b8361d976" containerName="glance-db-sync" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.166631 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="18752e43-a39c-4e17-bf83-831b8361d976" containerName="glance-db-sync" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.166661 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="e16c2aa0-48e9-49ea-ba59-e08fede9f48d" containerName="ovn-config" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.168438 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.195888 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f59b8f679-2vnf5"] Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.255927 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e4184c5f-6573-4950-8920-d0b3d7aa2989-dns-swift-storage-0\") pod \"dnsmasq-dns-5f59b8f679-2vnf5\" (UID: \"e4184c5f-6573-4950-8920-d0b3d7aa2989\") " pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.255999 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e4184c5f-6573-4950-8920-d0b3d7aa2989-ovsdbserver-sb\") pod \"dnsmasq-dns-5f59b8f679-2vnf5\" (UID: \"e4184c5f-6573-4950-8920-d0b3d7aa2989\") " pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.256051 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e4184c5f-6573-4950-8920-d0b3d7aa2989-dns-svc\") pod \"dnsmasq-dns-5f59b8f679-2vnf5\" (UID: \"e4184c5f-6573-4950-8920-d0b3d7aa2989\") " pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.256095 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bknn9\" (UniqueName: \"kubernetes.io/projected/e4184c5f-6573-4950-8920-d0b3d7aa2989-kube-api-access-bknn9\") pod \"dnsmasq-dns-5f59b8f679-2vnf5\" (UID: \"e4184c5f-6573-4950-8920-d0b3d7aa2989\") " pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.256131 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4184c5f-6573-4950-8920-d0b3d7aa2989-config\") pod \"dnsmasq-dns-5f59b8f679-2vnf5\" (UID: \"e4184c5f-6573-4950-8920-d0b3d7aa2989\") " pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.256156 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e4184c5f-6573-4950-8920-d0b3d7aa2989-ovsdbserver-nb\") pod \"dnsmasq-dns-5f59b8f679-2vnf5\" (UID: \"e4184c5f-6573-4950-8920-d0b3d7aa2989\") " pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.357543 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bknn9\" (UniqueName: \"kubernetes.io/projected/e4184c5f-6573-4950-8920-d0b3d7aa2989-kube-api-access-bknn9\") pod \"dnsmasq-dns-5f59b8f679-2vnf5\" (UID: \"e4184c5f-6573-4950-8920-d0b3d7aa2989\") " pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.357598 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4184c5f-6573-4950-8920-d0b3d7aa2989-config\") pod \"dnsmasq-dns-5f59b8f679-2vnf5\" (UID: \"e4184c5f-6573-4950-8920-d0b3d7aa2989\") " pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.357622 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e4184c5f-6573-4950-8920-d0b3d7aa2989-ovsdbserver-nb\") pod \"dnsmasq-dns-5f59b8f679-2vnf5\" (UID: \"e4184c5f-6573-4950-8920-d0b3d7aa2989\") " pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.357653 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e4184c5f-6573-4950-8920-d0b3d7aa2989-dns-swift-storage-0\") pod \"dnsmasq-dns-5f59b8f679-2vnf5\" (UID: \"e4184c5f-6573-4950-8920-d0b3d7aa2989\") " pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.357699 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e4184c5f-6573-4950-8920-d0b3d7aa2989-ovsdbserver-sb\") pod \"dnsmasq-dns-5f59b8f679-2vnf5\" (UID: \"e4184c5f-6573-4950-8920-d0b3d7aa2989\") " pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.357743 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e4184c5f-6573-4950-8920-d0b3d7aa2989-dns-svc\") pod \"dnsmasq-dns-5f59b8f679-2vnf5\" (UID: \"e4184c5f-6573-4950-8920-d0b3d7aa2989\") " pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.358916 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4184c5f-6573-4950-8920-d0b3d7aa2989-config\") pod \"dnsmasq-dns-5f59b8f679-2vnf5\" (UID: \"e4184c5f-6573-4950-8920-d0b3d7aa2989\") " pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.359413 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e4184c5f-6573-4950-8920-d0b3d7aa2989-dns-svc\") pod \"dnsmasq-dns-5f59b8f679-2vnf5\" (UID: \"e4184c5f-6573-4950-8920-d0b3d7aa2989\") " pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.359433 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e4184c5f-6573-4950-8920-d0b3d7aa2989-dns-swift-storage-0\") pod \"dnsmasq-dns-5f59b8f679-2vnf5\" (UID: \"e4184c5f-6573-4950-8920-d0b3d7aa2989\") " pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.363160 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e4184c5f-6573-4950-8920-d0b3d7aa2989-ovsdbserver-sb\") pod \"dnsmasq-dns-5f59b8f679-2vnf5\" (UID: \"e4184c5f-6573-4950-8920-d0b3d7aa2989\") " pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.363277 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e4184c5f-6573-4950-8920-d0b3d7aa2989-ovsdbserver-nb\") pod \"dnsmasq-dns-5f59b8f679-2vnf5\" (UID: \"e4184c5f-6573-4950-8920-d0b3d7aa2989\") " pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.383634 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bknn9\" (UniqueName: \"kubernetes.io/projected/e4184c5f-6573-4950-8920-d0b3d7aa2989-kube-api-access-bknn9\") pod \"dnsmasq-dns-5f59b8f679-2vnf5\" (UID: \"e4184c5f-6573-4950-8920-d0b3d7aa2989\") " pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.602253 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.618118 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-bqhqp" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.658420 4691 generic.go:334] "Generic (PLEG): container finished" podID="bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e" containerID="f26ee8d5b6435cce772fc4e2bc12fab25639e1fd14838f40c584bc3afd66be64" exitCode=0 Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.658513 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-bqhqp" event={"ID":"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e","Type":"ContainerDied","Data":"f26ee8d5b6435cce772fc4e2bc12fab25639e1fd14838f40c584bc3afd66be64"} Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.658555 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-bqhqp" event={"ID":"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e","Type":"ContainerDied","Data":"00b89c3a5e723dafd9fa3730384a23a399c9c827a009c238c4da3f5f84010547"} Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.658584 4691 scope.go:117] "RemoveContainer" containerID="f26ee8d5b6435cce772fc4e2bc12fab25639e1fd14838f40c584bc3afd66be64" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.658841 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-bqhqp" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.664181 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-dns-svc\") pod \"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e\" (UID: \"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e\") " Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.664245 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wvtrb\" (UniqueName: \"kubernetes.io/projected/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-kube-api-access-wvtrb\") pod \"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e\" (UID: \"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e\") " Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.664286 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-dns-swift-storage-0\") pod \"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e\" (UID: \"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e\") " Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.664338 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-ovsdbserver-sb\") pod \"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e\" (UID: \"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e\") " Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.664375 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-ovsdbserver-nb\") pod \"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e\" (UID: \"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e\") " Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.664398 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-config\") pod \"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e\" (UID: \"bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e\") " Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.692404 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-kube-api-access-wvtrb" (OuterVolumeSpecName: "kube-api-access-wvtrb") pod "bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e" (UID: "bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e"). InnerVolumeSpecName "kube-api-access-wvtrb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.738537 4691 scope.go:117] "RemoveContainer" containerID="c72600fcf0d69b8e2c0ef7c27e906684d5f44203f7f2d47b70884a9489b83968" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.739852 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e" (UID: "bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.751177 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-config" (OuterVolumeSpecName: "config") pod "bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e" (UID: "bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.769128 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wvtrb\" (UniqueName: \"kubernetes.io/projected/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-kube-api-access-wvtrb\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.769170 4691 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.769183 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-config\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.775584 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e" (UID: "bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.786847 4691 scope.go:117] "RemoveContainer" containerID="f26ee8d5b6435cce772fc4e2bc12fab25639e1fd14838f40c584bc3afd66be64" Nov 24 08:12:58 crc kubenswrapper[4691]: E1124 08:12:58.787386 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f26ee8d5b6435cce772fc4e2bc12fab25639e1fd14838f40c584bc3afd66be64\": container with ID starting with f26ee8d5b6435cce772fc4e2bc12fab25639e1fd14838f40c584bc3afd66be64 not found: ID does not exist" containerID="f26ee8d5b6435cce772fc4e2bc12fab25639e1fd14838f40c584bc3afd66be64" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.787667 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f26ee8d5b6435cce772fc4e2bc12fab25639e1fd14838f40c584bc3afd66be64"} err="failed to get container status \"f26ee8d5b6435cce772fc4e2bc12fab25639e1fd14838f40c584bc3afd66be64\": rpc error: code = NotFound desc = could not find container \"f26ee8d5b6435cce772fc4e2bc12fab25639e1fd14838f40c584bc3afd66be64\": container with ID starting with f26ee8d5b6435cce772fc4e2bc12fab25639e1fd14838f40c584bc3afd66be64 not found: ID does not exist" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.787716 4691 scope.go:117] "RemoveContainer" containerID="c72600fcf0d69b8e2c0ef7c27e906684d5f44203f7f2d47b70884a9489b83968" Nov 24 08:12:58 crc kubenswrapper[4691]: E1124 08:12:58.788082 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c72600fcf0d69b8e2c0ef7c27e906684d5f44203f7f2d47b70884a9489b83968\": container with ID starting with c72600fcf0d69b8e2c0ef7c27e906684d5f44203f7f2d47b70884a9489b83968 not found: ID does not exist" containerID="c72600fcf0d69b8e2c0ef7c27e906684d5f44203f7f2d47b70884a9489b83968" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.788114 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c72600fcf0d69b8e2c0ef7c27e906684d5f44203f7f2d47b70884a9489b83968"} err="failed to get container status \"c72600fcf0d69b8e2c0ef7c27e906684d5f44203f7f2d47b70884a9489b83968\": rpc error: code = NotFound desc = could not find container \"c72600fcf0d69b8e2c0ef7c27e906684d5f44203f7f2d47b70884a9489b83968\": container with ID starting with c72600fcf0d69b8e2c0ef7c27e906684d5f44203f7f2d47b70884a9489b83968 not found: ID does not exist" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.790557 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e" (UID: "bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.810478 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e" (UID: "bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.872214 4691 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.872262 4691 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.872277 4691 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 08:12:58 crc kubenswrapper[4691]: I1124 08:12:58.997746 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-bqhqp"] Nov 24 08:12:59 crc kubenswrapper[4691]: I1124 08:12:59.006573 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-bqhqp"] Nov 24 08:12:59 crc kubenswrapper[4691]: I1124 08:12:59.128959 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f59b8f679-2vnf5"] Nov 24 08:12:59 crc kubenswrapper[4691]: I1124 08:12:59.673313 4691 generic.go:334] "Generic (PLEG): container finished" podID="e4184c5f-6573-4950-8920-d0b3d7aa2989" containerID="b3b73dfb520a13d0f0e064d9ef416b63b9832f8558885f558f784b79ed0c69b6" exitCode=0 Nov 24 08:12:59 crc kubenswrapper[4691]: I1124 08:12:59.673414 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" event={"ID":"e4184c5f-6573-4950-8920-d0b3d7aa2989","Type":"ContainerDied","Data":"b3b73dfb520a13d0f0e064d9ef416b63b9832f8558885f558f784b79ed0c69b6"} Nov 24 08:12:59 crc kubenswrapper[4691]: I1124 08:12:59.673798 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" event={"ID":"e4184c5f-6573-4950-8920-d0b3d7aa2989","Type":"ContainerStarted","Data":"56e162076a445e9f662424f614fd90e381ded222bdfafc57c0e7dbc19140d8c6"} Nov 24 08:13:00 crc kubenswrapper[4691]: I1124 08:13:00.683913 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" event={"ID":"e4184c5f-6573-4950-8920-d0b3d7aa2989","Type":"ContainerStarted","Data":"2a2a2562c36e38706ab1f8efc94201056f0d5cc564e742fbabbe272807054562"} Nov 24 08:13:00 crc kubenswrapper[4691]: I1124 08:13:00.684333 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" Nov 24 08:13:00 crc kubenswrapper[4691]: I1124 08:13:00.711718 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" podStartSLOduration=2.711686571 podStartE2EDuration="2.711686571s" podCreationTimestamp="2025-11-24 08:12:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:13:00.703552515 +0000 UTC m=+942.702501804" watchObservedRunningTime="2025-11-24 08:13:00.711686571 +0000 UTC m=+942.710635820" Nov 24 08:13:00 crc kubenswrapper[4691]: I1124 08:13:00.770882 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e" path="/var/lib/kubelet/pods/bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e/volumes" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.048655 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.391034 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-dsgr5"] Nov 24 08:13:02 crc kubenswrapper[4691]: E1124 08:13:02.391351 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e" containerName="init" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.391367 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e" containerName="init" Nov 24 08:13:02 crc kubenswrapper[4691]: E1124 08:13:02.391396 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e" containerName="dnsmasq-dns" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.391402 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e" containerName="dnsmasq-dns" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.391579 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb7fb56b-a6e2-4a3a-ab31-ef0a27eb8e5e" containerName="dnsmasq-dns" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.392174 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-dsgr5" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.409728 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-dsgr5"] Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.412795 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.457290 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jqn4v\" (UniqueName: \"kubernetes.io/projected/28b9ab5c-c3db-4418-a996-bf6da7141bba-kube-api-access-jqn4v\") pod \"cinder-db-create-dsgr5\" (UID: \"28b9ab5c-c3db-4418-a996-bf6da7141bba\") " pod="openstack/cinder-db-create-dsgr5" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.458614 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/28b9ab5c-c3db-4418-a996-bf6da7141bba-operator-scripts\") pod \"cinder-db-create-dsgr5\" (UID: \"28b9ab5c-c3db-4418-a996-bf6da7141bba\") " pod="openstack/cinder-db-create-dsgr5" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.497627 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-cr6nx"] Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.503424 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-cr6nx" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.511745 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-cr6nx"] Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.543962 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-3e08-account-create-wn9m9"] Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.546329 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-3e08-account-create-wn9m9" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.552800 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.560651 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f079598d-3f79-401a-aada-3f81a4fc3555-operator-scripts\") pod \"barbican-3e08-account-create-wn9m9\" (UID: \"f079598d-3f79-401a-aada-3f81a4fc3555\") " pod="openstack/barbican-3e08-account-create-wn9m9" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.560745 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/28b9ab5c-c3db-4418-a996-bf6da7141bba-operator-scripts\") pod \"cinder-db-create-dsgr5\" (UID: \"28b9ab5c-c3db-4418-a996-bf6da7141bba\") " pod="openstack/cinder-db-create-dsgr5" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.560779 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f4e87f52-369f-4a4f-9d3b-7e430dbac208-operator-scripts\") pod \"barbican-db-create-cr6nx\" (UID: \"f4e87f52-369f-4a4f-9d3b-7e430dbac208\") " pod="openstack/barbican-db-create-cr6nx" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.560802 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r988v\" (UniqueName: \"kubernetes.io/projected/f079598d-3f79-401a-aada-3f81a4fc3555-kube-api-access-r988v\") pod \"barbican-3e08-account-create-wn9m9\" (UID: \"f079598d-3f79-401a-aada-3f81a4fc3555\") " pod="openstack/barbican-3e08-account-create-wn9m9" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.560843 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9w8js\" (UniqueName: \"kubernetes.io/projected/f4e87f52-369f-4a4f-9d3b-7e430dbac208-kube-api-access-9w8js\") pod \"barbican-db-create-cr6nx\" (UID: \"f4e87f52-369f-4a4f-9d3b-7e430dbac208\") " pod="openstack/barbican-db-create-cr6nx" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.560883 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jqn4v\" (UniqueName: \"kubernetes.io/projected/28b9ab5c-c3db-4418-a996-bf6da7141bba-kube-api-access-jqn4v\") pod \"cinder-db-create-dsgr5\" (UID: \"28b9ab5c-c3db-4418-a996-bf6da7141bba\") " pod="openstack/cinder-db-create-dsgr5" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.562290 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/28b9ab5c-c3db-4418-a996-bf6da7141bba-operator-scripts\") pod \"cinder-db-create-dsgr5\" (UID: \"28b9ab5c-c3db-4418-a996-bf6da7141bba\") " pod="openstack/cinder-db-create-dsgr5" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.575727 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-3e08-account-create-wn9m9"] Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.594411 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-ad5e-account-create-vkzqn"] Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.598464 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-ad5e-account-create-vkzqn" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.610835 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jqn4v\" (UniqueName: \"kubernetes.io/projected/28b9ab5c-c3db-4418-a996-bf6da7141bba-kube-api-access-jqn4v\") pod \"cinder-db-create-dsgr5\" (UID: \"28b9ab5c-c3db-4418-a996-bf6da7141bba\") " pod="openstack/cinder-db-create-dsgr5" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.611007 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.628764 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-ad5e-account-create-vkzqn"] Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.662035 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f4e87f52-369f-4a4f-9d3b-7e430dbac208-operator-scripts\") pod \"barbican-db-create-cr6nx\" (UID: \"f4e87f52-369f-4a4f-9d3b-7e430dbac208\") " pod="openstack/barbican-db-create-cr6nx" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.662097 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r988v\" (UniqueName: \"kubernetes.io/projected/f079598d-3f79-401a-aada-3f81a4fc3555-kube-api-access-r988v\") pod \"barbican-3e08-account-create-wn9m9\" (UID: \"f079598d-3f79-401a-aada-3f81a4fc3555\") " pod="openstack/barbican-3e08-account-create-wn9m9" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.662150 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9w8js\" (UniqueName: \"kubernetes.io/projected/f4e87f52-369f-4a4f-9d3b-7e430dbac208-kube-api-access-9w8js\") pod \"barbican-db-create-cr6nx\" (UID: \"f4e87f52-369f-4a4f-9d3b-7e430dbac208\") " pod="openstack/barbican-db-create-cr6nx" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.662182 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ec830eac-ff50-4522-a9ec-1a6c9870859d-operator-scripts\") pod \"cinder-ad5e-account-create-vkzqn\" (UID: \"ec830eac-ff50-4522-a9ec-1a6c9870859d\") " pod="openstack/cinder-ad5e-account-create-vkzqn" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.662228 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-64n2f\" (UniqueName: \"kubernetes.io/projected/ec830eac-ff50-4522-a9ec-1a6c9870859d-kube-api-access-64n2f\") pod \"cinder-ad5e-account-create-vkzqn\" (UID: \"ec830eac-ff50-4522-a9ec-1a6c9870859d\") " pod="openstack/cinder-ad5e-account-create-vkzqn" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.662270 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f079598d-3f79-401a-aada-3f81a4fc3555-operator-scripts\") pod \"barbican-3e08-account-create-wn9m9\" (UID: \"f079598d-3f79-401a-aada-3f81a4fc3555\") " pod="openstack/barbican-3e08-account-create-wn9m9" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.663330 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f079598d-3f79-401a-aada-3f81a4fc3555-operator-scripts\") pod \"barbican-3e08-account-create-wn9m9\" (UID: \"f079598d-3f79-401a-aada-3f81a4fc3555\") " pod="openstack/barbican-3e08-account-create-wn9m9" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.666729 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f4e87f52-369f-4a4f-9d3b-7e430dbac208-operator-scripts\") pod \"barbican-db-create-cr6nx\" (UID: \"f4e87f52-369f-4a4f-9d3b-7e430dbac208\") " pod="openstack/barbican-db-create-cr6nx" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.683175 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r988v\" (UniqueName: \"kubernetes.io/projected/f079598d-3f79-401a-aada-3f81a4fc3555-kube-api-access-r988v\") pod \"barbican-3e08-account-create-wn9m9\" (UID: \"f079598d-3f79-401a-aada-3f81a4fc3555\") " pod="openstack/barbican-3e08-account-create-wn9m9" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.685497 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9w8js\" (UniqueName: \"kubernetes.io/projected/f4e87f52-369f-4a4f-9d3b-7e430dbac208-kube-api-access-9w8js\") pod \"barbican-db-create-cr6nx\" (UID: \"f4e87f52-369f-4a4f-9d3b-7e430dbac208\") " pod="openstack/barbican-db-create-cr6nx" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.709094 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-dsgr5" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.841588 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ec830eac-ff50-4522-a9ec-1a6c9870859d-operator-scripts\") pod \"cinder-ad5e-account-create-vkzqn\" (UID: \"ec830eac-ff50-4522-a9ec-1a6c9870859d\") " pod="openstack/cinder-ad5e-account-create-vkzqn" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.841809 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-64n2f\" (UniqueName: \"kubernetes.io/projected/ec830eac-ff50-4522-a9ec-1a6c9870859d-kube-api-access-64n2f\") pod \"cinder-ad5e-account-create-vkzqn\" (UID: \"ec830eac-ff50-4522-a9ec-1a6c9870859d\") " pod="openstack/cinder-ad5e-account-create-vkzqn" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.843291 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ec830eac-ff50-4522-a9ec-1a6c9870859d-operator-scripts\") pod \"cinder-ad5e-account-create-vkzqn\" (UID: \"ec830eac-ff50-4522-a9ec-1a6c9870859d\") " pod="openstack/cinder-ad5e-account-create-vkzqn" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.849734 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-cr6nx" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.895847 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-64n2f\" (UniqueName: \"kubernetes.io/projected/ec830eac-ff50-4522-a9ec-1a6c9870859d-kube-api-access-64n2f\") pod \"cinder-ad5e-account-create-vkzqn\" (UID: \"ec830eac-ff50-4522-a9ec-1a6c9870859d\") " pod="openstack/cinder-ad5e-account-create-vkzqn" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.896734 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-3e08-account-create-wn9m9" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.913618 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-11b5-account-create-bbtld"] Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.915245 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-fpjgm"] Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.915417 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-11b5-account-create-bbtld" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.916885 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-11b5-account-create-bbtld"] Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.916999 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-fpjgm" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.917144 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-l7k7s"] Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.918739 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.919584 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-l7k7s" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.921726 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-rw8tv" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.921772 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.922116 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.924855 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-fpjgm"] Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.926275 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.938501 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-l7k7s"] Nov 24 08:13:02 crc kubenswrapper[4691]: I1124 08:13:02.976018 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-ad5e-account-create-vkzqn" Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.047630 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0eeca96b-473b-466d-92bd-a1c3fdd22dac-config-data\") pod \"keystone-db-sync-l7k7s\" (UID: \"0eeca96b-473b-466d-92bd-a1c3fdd22dac\") " pod="openstack/keystone-db-sync-l7k7s" Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.047678 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r5wsl\" (UniqueName: \"kubernetes.io/projected/0eeca96b-473b-466d-92bd-a1c3fdd22dac-kube-api-access-r5wsl\") pod \"keystone-db-sync-l7k7s\" (UID: \"0eeca96b-473b-466d-92bd-a1c3fdd22dac\") " pod="openstack/keystone-db-sync-l7k7s" Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.047714 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4zh8r\" (UniqueName: \"kubernetes.io/projected/0e0893d9-ac7b-472f-8fc9-ab5dffccf750-kube-api-access-4zh8r\") pod \"neutron-db-create-fpjgm\" (UID: \"0e0893d9-ac7b-472f-8fc9-ab5dffccf750\") " pod="openstack/neutron-db-create-fpjgm" Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.047741 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0eeca96b-473b-466d-92bd-a1c3fdd22dac-combined-ca-bundle\") pod \"keystone-db-sync-l7k7s\" (UID: \"0eeca96b-473b-466d-92bd-a1c3fdd22dac\") " pod="openstack/keystone-db-sync-l7k7s" Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.047768 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-85nnx\" (UniqueName: \"kubernetes.io/projected/2268415a-bc56-4f19-8d6e-57c09bc60145-kube-api-access-85nnx\") pod \"neutron-11b5-account-create-bbtld\" (UID: \"2268415a-bc56-4f19-8d6e-57c09bc60145\") " pod="openstack/neutron-11b5-account-create-bbtld" Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.048176 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e0893d9-ac7b-472f-8fc9-ab5dffccf750-operator-scripts\") pod \"neutron-db-create-fpjgm\" (UID: \"0e0893d9-ac7b-472f-8fc9-ab5dffccf750\") " pod="openstack/neutron-db-create-fpjgm" Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.048253 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2268415a-bc56-4f19-8d6e-57c09bc60145-operator-scripts\") pod \"neutron-11b5-account-create-bbtld\" (UID: \"2268415a-bc56-4f19-8d6e-57c09bc60145\") " pod="openstack/neutron-11b5-account-create-bbtld" Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.150205 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e0893d9-ac7b-472f-8fc9-ab5dffccf750-operator-scripts\") pod \"neutron-db-create-fpjgm\" (UID: \"0e0893d9-ac7b-472f-8fc9-ab5dffccf750\") " pod="openstack/neutron-db-create-fpjgm" Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.150576 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2268415a-bc56-4f19-8d6e-57c09bc60145-operator-scripts\") pod \"neutron-11b5-account-create-bbtld\" (UID: \"2268415a-bc56-4f19-8d6e-57c09bc60145\") " pod="openstack/neutron-11b5-account-create-bbtld" Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.150621 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0eeca96b-473b-466d-92bd-a1c3fdd22dac-config-data\") pod \"keystone-db-sync-l7k7s\" (UID: \"0eeca96b-473b-466d-92bd-a1c3fdd22dac\") " pod="openstack/keystone-db-sync-l7k7s" Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.150649 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r5wsl\" (UniqueName: \"kubernetes.io/projected/0eeca96b-473b-466d-92bd-a1c3fdd22dac-kube-api-access-r5wsl\") pod \"keystone-db-sync-l7k7s\" (UID: \"0eeca96b-473b-466d-92bd-a1c3fdd22dac\") " pod="openstack/keystone-db-sync-l7k7s" Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.150691 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4zh8r\" (UniqueName: \"kubernetes.io/projected/0e0893d9-ac7b-472f-8fc9-ab5dffccf750-kube-api-access-4zh8r\") pod \"neutron-db-create-fpjgm\" (UID: \"0e0893d9-ac7b-472f-8fc9-ab5dffccf750\") " pod="openstack/neutron-db-create-fpjgm" Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.150725 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0eeca96b-473b-466d-92bd-a1c3fdd22dac-combined-ca-bundle\") pod \"keystone-db-sync-l7k7s\" (UID: \"0eeca96b-473b-466d-92bd-a1c3fdd22dac\") " pod="openstack/keystone-db-sync-l7k7s" Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.150753 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-85nnx\" (UniqueName: \"kubernetes.io/projected/2268415a-bc56-4f19-8d6e-57c09bc60145-kube-api-access-85nnx\") pod \"neutron-11b5-account-create-bbtld\" (UID: \"2268415a-bc56-4f19-8d6e-57c09bc60145\") " pod="openstack/neutron-11b5-account-create-bbtld" Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.152186 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e0893d9-ac7b-472f-8fc9-ab5dffccf750-operator-scripts\") pod \"neutron-db-create-fpjgm\" (UID: \"0e0893d9-ac7b-472f-8fc9-ab5dffccf750\") " pod="openstack/neutron-db-create-fpjgm" Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.152864 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2268415a-bc56-4f19-8d6e-57c09bc60145-operator-scripts\") pod \"neutron-11b5-account-create-bbtld\" (UID: \"2268415a-bc56-4f19-8d6e-57c09bc60145\") " pod="openstack/neutron-11b5-account-create-bbtld" Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.163267 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0eeca96b-473b-466d-92bd-a1c3fdd22dac-combined-ca-bundle\") pod \"keystone-db-sync-l7k7s\" (UID: \"0eeca96b-473b-466d-92bd-a1c3fdd22dac\") " pod="openstack/keystone-db-sync-l7k7s" Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.163382 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0eeca96b-473b-466d-92bd-a1c3fdd22dac-config-data\") pod \"keystone-db-sync-l7k7s\" (UID: \"0eeca96b-473b-466d-92bd-a1c3fdd22dac\") " pod="openstack/keystone-db-sync-l7k7s" Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.175439 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-85nnx\" (UniqueName: \"kubernetes.io/projected/2268415a-bc56-4f19-8d6e-57c09bc60145-kube-api-access-85nnx\") pod \"neutron-11b5-account-create-bbtld\" (UID: \"2268415a-bc56-4f19-8d6e-57c09bc60145\") " pod="openstack/neutron-11b5-account-create-bbtld" Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.180030 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4zh8r\" (UniqueName: \"kubernetes.io/projected/0e0893d9-ac7b-472f-8fc9-ab5dffccf750-kube-api-access-4zh8r\") pod \"neutron-db-create-fpjgm\" (UID: \"0e0893d9-ac7b-472f-8fc9-ab5dffccf750\") " pod="openstack/neutron-db-create-fpjgm" Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.193064 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r5wsl\" (UniqueName: \"kubernetes.io/projected/0eeca96b-473b-466d-92bd-a1c3fdd22dac-kube-api-access-r5wsl\") pod \"keystone-db-sync-l7k7s\" (UID: \"0eeca96b-473b-466d-92bd-a1c3fdd22dac\") " pod="openstack/keystone-db-sync-l7k7s" Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.302990 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-dsgr5"] Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.305208 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-11b5-account-create-bbtld" Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.324626 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-fpjgm" Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.326095 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-l7k7s" Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.544630 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-cr6nx"] Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.584722 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-ad5e-account-create-vkzqn"] Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.715919 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-dsgr5" event={"ID":"28b9ab5c-c3db-4418-a996-bf6da7141bba","Type":"ContainerStarted","Data":"417161d17090e868f1ee6b16e362945763298a2a58ba4f0edf696cae1c28bff8"} Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.715982 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-dsgr5" event={"ID":"28b9ab5c-c3db-4418-a996-bf6da7141bba","Type":"ContainerStarted","Data":"57cc5d143bba3fa02e356196ffd428452cec22fbb896b7ee3373f4968b7c6366"} Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.728620 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-cr6nx" event={"ID":"f4e87f52-369f-4a4f-9d3b-7e430dbac208","Type":"ContainerStarted","Data":"ad281247f7b2b3300598c420b7553c43313490915a85e2f9074406adc4f98277"} Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.745354 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-ad5e-account-create-vkzqn" event={"ID":"ec830eac-ff50-4522-a9ec-1a6c9870859d","Type":"ContainerStarted","Data":"1c1fe9ec655abf24f97c38a1f0af1f07dd5186ac8cac088dc078211881b8e6c4"} Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.750347 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-3e08-account-create-wn9m9"] Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.751016 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-dsgr5" podStartSLOduration=1.7509614980000001 podStartE2EDuration="1.750961498s" podCreationTimestamp="2025-11-24 08:13:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:13:03.741290598 +0000 UTC m=+945.740239847" watchObservedRunningTime="2025-11-24 08:13:03.750961498 +0000 UTC m=+945.749910737" Nov 24 08:13:03 crc kubenswrapper[4691]: W1124 08:13:03.753041 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf079598d_3f79_401a_aada_3f81a4fc3555.slice/crio-45ef4386cc3570470039649f130395dd09dade7775475295bec593352d265159 WatchSource:0}: Error finding container 45ef4386cc3570470039649f130395dd09dade7775475295bec593352d265159: Status 404 returned error can't find the container with id 45ef4386cc3570470039649f130395dd09dade7775475295bec593352d265159 Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.877047 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-l7k7s"] Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.980138 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-11b5-account-create-bbtld"] Nov 24 08:13:03 crc kubenswrapper[4691]: I1124 08:13:03.989339 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-fpjgm"] Nov 24 08:13:04 crc kubenswrapper[4691]: W1124 08:13:04.018946 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2268415a_bc56_4f19_8d6e_57c09bc60145.slice/crio-3a1bc9aae37b573ea49251a982ae80a1ab004bf911efe45e51654e56449978ff WatchSource:0}: Error finding container 3a1bc9aae37b573ea49251a982ae80a1ab004bf911efe45e51654e56449978ff: Status 404 returned error can't find the container with id 3a1bc9aae37b573ea49251a982ae80a1ab004bf911efe45e51654e56449978ff Nov 24 08:13:04 crc kubenswrapper[4691]: I1124 08:13:04.759982 4691 generic.go:334] "Generic (PLEG): container finished" podID="28b9ab5c-c3db-4418-a996-bf6da7141bba" containerID="417161d17090e868f1ee6b16e362945763298a2a58ba4f0edf696cae1c28bff8" exitCode=0 Nov 24 08:13:04 crc kubenswrapper[4691]: I1124 08:13:04.766154 4691 generic.go:334] "Generic (PLEG): container finished" podID="f4e87f52-369f-4a4f-9d3b-7e430dbac208" containerID="28dd174184bd6b03d20c693f712f859e498b4627a0ca65c49b043f761e796e17" exitCode=0 Nov 24 08:13:04 crc kubenswrapper[4691]: I1124 08:13:04.768921 4691 generic.go:334] "Generic (PLEG): container finished" podID="ec830eac-ff50-4522-a9ec-1a6c9870859d" containerID="d2043d5361ff87e2a2d9bfd8e05d81cdf277a395feb5d8066f0cef445fcd6d75" exitCode=0 Nov 24 08:13:04 crc kubenswrapper[4691]: I1124 08:13:04.780785 4691 generic.go:334] "Generic (PLEG): container finished" podID="2268415a-bc56-4f19-8d6e-57c09bc60145" containerID="52b240dddc8b3e0e59777ef1e957167213657af7e54590b02aa020cc75dedf91" exitCode=0 Nov 24 08:13:04 crc kubenswrapper[4691]: I1124 08:13:04.781644 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-dsgr5" event={"ID":"28b9ab5c-c3db-4418-a996-bf6da7141bba","Type":"ContainerDied","Data":"417161d17090e868f1ee6b16e362945763298a2a58ba4f0edf696cae1c28bff8"} Nov 24 08:13:04 crc kubenswrapper[4691]: I1124 08:13:04.781833 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-cr6nx" event={"ID":"f4e87f52-369f-4a4f-9d3b-7e430dbac208","Type":"ContainerDied","Data":"28dd174184bd6b03d20c693f712f859e498b4627a0ca65c49b043f761e796e17"} Nov 24 08:13:04 crc kubenswrapper[4691]: I1124 08:13:04.781995 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-ad5e-account-create-vkzqn" event={"ID":"ec830eac-ff50-4522-a9ec-1a6c9870859d","Type":"ContainerDied","Data":"d2043d5361ff87e2a2d9bfd8e05d81cdf277a395feb5d8066f0cef445fcd6d75"} Nov 24 08:13:04 crc kubenswrapper[4691]: I1124 08:13:04.782138 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-l7k7s" event={"ID":"0eeca96b-473b-466d-92bd-a1c3fdd22dac","Type":"ContainerStarted","Data":"5172bb7fdea3fe8e550e7818f80190aa03c4099aa87b44811f8b590e3b74dc19"} Nov 24 08:13:04 crc kubenswrapper[4691]: I1124 08:13:04.782282 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-11b5-account-create-bbtld" event={"ID":"2268415a-bc56-4f19-8d6e-57c09bc60145","Type":"ContainerDied","Data":"52b240dddc8b3e0e59777ef1e957167213657af7e54590b02aa020cc75dedf91"} Nov 24 08:13:04 crc kubenswrapper[4691]: I1124 08:13:04.782410 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-11b5-account-create-bbtld" event={"ID":"2268415a-bc56-4f19-8d6e-57c09bc60145","Type":"ContainerStarted","Data":"3a1bc9aae37b573ea49251a982ae80a1ab004bf911efe45e51654e56449978ff"} Nov 24 08:13:04 crc kubenswrapper[4691]: I1124 08:13:04.791721 4691 generic.go:334] "Generic (PLEG): container finished" podID="0e0893d9-ac7b-472f-8fc9-ab5dffccf750" containerID="023df6d958e8e53c7f7fe9b6d3f94c2f1a2130fae6d79ff982ea23ab4b4b6323" exitCode=0 Nov 24 08:13:04 crc kubenswrapper[4691]: I1124 08:13:04.791902 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-fpjgm" event={"ID":"0e0893d9-ac7b-472f-8fc9-ab5dffccf750","Type":"ContainerDied","Data":"023df6d958e8e53c7f7fe9b6d3f94c2f1a2130fae6d79ff982ea23ab4b4b6323"} Nov 24 08:13:04 crc kubenswrapper[4691]: I1124 08:13:04.791926 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-fpjgm" event={"ID":"0e0893d9-ac7b-472f-8fc9-ab5dffccf750","Type":"ContainerStarted","Data":"86272281cf99c576fc49e62a3fa0ba01368b97cd306f5d51bb9a5be2afb257bf"} Nov 24 08:13:04 crc kubenswrapper[4691]: I1124 08:13:04.794871 4691 generic.go:334] "Generic (PLEG): container finished" podID="f079598d-3f79-401a-aada-3f81a4fc3555" containerID="92a37b08e7d4f94854eeeea4cfa07de7639a21088c70615b2e62d2626c6e2f04" exitCode=0 Nov 24 08:13:04 crc kubenswrapper[4691]: I1124 08:13:04.794916 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-3e08-account-create-wn9m9" event={"ID":"f079598d-3f79-401a-aada-3f81a4fc3555","Type":"ContainerDied","Data":"92a37b08e7d4f94854eeeea4cfa07de7639a21088c70615b2e62d2626c6e2f04"} Nov 24 08:13:04 crc kubenswrapper[4691]: I1124 08:13:04.794941 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-3e08-account-create-wn9m9" event={"ID":"f079598d-3f79-401a-aada-3f81a4fc3555","Type":"ContainerStarted","Data":"45ef4386cc3570470039649f130395dd09dade7775475295bec593352d265159"} Nov 24 08:13:08 crc kubenswrapper[4691]: I1124 08:13:08.604491 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" Nov 24 08:13:08 crc kubenswrapper[4691]: I1124 08:13:08.691854 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-dq5hb"] Nov 24 08:13:08 crc kubenswrapper[4691]: I1124 08:13:08.692753 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-b8fbc5445-dq5hb" podUID="fac2b9b8-8c64-4322-9251-b22dc0e758ed" containerName="dnsmasq-dns" containerID="cri-o://e6e2281672da1062336f30149b437c6019bb20363bbbb5195f4c30506d57d6f6" gracePeriod=10 Nov 24 08:13:08 crc kubenswrapper[4691]: I1124 08:13:08.840361 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-11b5-account-create-bbtld" event={"ID":"2268415a-bc56-4f19-8d6e-57c09bc60145","Type":"ContainerDied","Data":"3a1bc9aae37b573ea49251a982ae80a1ab004bf911efe45e51654e56449978ff"} Nov 24 08:13:08 crc kubenswrapper[4691]: I1124 08:13:08.840642 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3a1bc9aae37b573ea49251a982ae80a1ab004bf911efe45e51654e56449978ff" Nov 24 08:13:08 crc kubenswrapper[4691]: I1124 08:13:08.843215 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-fpjgm" event={"ID":"0e0893d9-ac7b-472f-8fc9-ab5dffccf750","Type":"ContainerDied","Data":"86272281cf99c576fc49e62a3fa0ba01368b97cd306f5d51bb9a5be2afb257bf"} Nov 24 08:13:08 crc kubenswrapper[4691]: I1124 08:13:08.843244 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="86272281cf99c576fc49e62a3fa0ba01368b97cd306f5d51bb9a5be2afb257bf" Nov 24 08:13:08 crc kubenswrapper[4691]: I1124 08:13:08.845190 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-3e08-account-create-wn9m9" event={"ID":"f079598d-3f79-401a-aada-3f81a4fc3555","Type":"ContainerDied","Data":"45ef4386cc3570470039649f130395dd09dade7775475295bec593352d265159"} Nov 24 08:13:08 crc kubenswrapper[4691]: I1124 08:13:08.845214 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="45ef4386cc3570470039649f130395dd09dade7775475295bec593352d265159" Nov 24 08:13:08 crc kubenswrapper[4691]: I1124 08:13:08.846475 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-dsgr5" event={"ID":"28b9ab5c-c3db-4418-a996-bf6da7141bba","Type":"ContainerDied","Data":"57cc5d143bba3fa02e356196ffd428452cec22fbb896b7ee3373f4968b7c6366"} Nov 24 08:13:08 crc kubenswrapper[4691]: I1124 08:13:08.846496 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="57cc5d143bba3fa02e356196ffd428452cec22fbb896b7ee3373f4968b7c6366" Nov 24 08:13:08 crc kubenswrapper[4691]: I1124 08:13:08.847654 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-cr6nx" event={"ID":"f4e87f52-369f-4a4f-9d3b-7e430dbac208","Type":"ContainerDied","Data":"ad281247f7b2b3300598c420b7553c43313490915a85e2f9074406adc4f98277"} Nov 24 08:13:08 crc kubenswrapper[4691]: I1124 08:13:08.847675 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad281247f7b2b3300598c420b7553c43313490915a85e2f9074406adc4f98277" Nov 24 08:13:08 crc kubenswrapper[4691]: I1124 08:13:08.855281 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-ad5e-account-create-vkzqn" event={"ID":"ec830eac-ff50-4522-a9ec-1a6c9870859d","Type":"ContainerDied","Data":"1c1fe9ec655abf24f97c38a1f0af1f07dd5186ac8cac088dc078211881b8e6c4"} Nov 24 08:13:08 crc kubenswrapper[4691]: I1124 08:13:08.855319 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1c1fe9ec655abf24f97c38a1f0af1f07dd5186ac8cac088dc078211881b8e6c4" Nov 24 08:13:08 crc kubenswrapper[4691]: I1124 08:13:08.859171 4691 generic.go:334] "Generic (PLEG): container finished" podID="fac2b9b8-8c64-4322-9251-b22dc0e758ed" containerID="e6e2281672da1062336f30149b437c6019bb20363bbbb5195f4c30506d57d6f6" exitCode=0 Nov 24 08:13:08 crc kubenswrapper[4691]: I1124 08:13:08.859205 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-dq5hb" event={"ID":"fac2b9b8-8c64-4322-9251-b22dc0e758ed","Type":"ContainerDied","Data":"e6e2281672da1062336f30149b437c6019bb20363bbbb5195f4c30506d57d6f6"} Nov 24 08:13:08 crc kubenswrapper[4691]: I1124 08:13:08.984139 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-ad5e-account-create-vkzqn" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.019605 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-dsgr5" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.052901 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-fpjgm" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.068197 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-11b5-account-create-bbtld" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.083756 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-3e08-account-create-wn9m9" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.096629 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-cr6nx" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.104773 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-64n2f\" (UniqueName: \"kubernetes.io/projected/ec830eac-ff50-4522-a9ec-1a6c9870859d-kube-api-access-64n2f\") pod \"ec830eac-ff50-4522-a9ec-1a6c9870859d\" (UID: \"ec830eac-ff50-4522-a9ec-1a6c9870859d\") " Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.104969 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jqn4v\" (UniqueName: \"kubernetes.io/projected/28b9ab5c-c3db-4418-a996-bf6da7141bba-kube-api-access-jqn4v\") pod \"28b9ab5c-c3db-4418-a996-bf6da7141bba\" (UID: \"28b9ab5c-c3db-4418-a996-bf6da7141bba\") " Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.105127 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ec830eac-ff50-4522-a9ec-1a6c9870859d-operator-scripts\") pod \"ec830eac-ff50-4522-a9ec-1a6c9870859d\" (UID: \"ec830eac-ff50-4522-a9ec-1a6c9870859d\") " Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.105273 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/28b9ab5c-c3db-4418-a996-bf6da7141bba-operator-scripts\") pod \"28b9ab5c-c3db-4418-a996-bf6da7141bba\" (UID: \"28b9ab5c-c3db-4418-a996-bf6da7141bba\") " Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.106165 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/28b9ab5c-c3db-4418-a996-bf6da7141bba-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "28b9ab5c-c3db-4418-a996-bf6da7141bba" (UID: "28b9ab5c-c3db-4418-a996-bf6da7141bba"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.107599 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ec830eac-ff50-4522-a9ec-1a6c9870859d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ec830eac-ff50-4522-a9ec-1a6c9870859d" (UID: "ec830eac-ff50-4522-a9ec-1a6c9870859d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.117342 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec830eac-ff50-4522-a9ec-1a6c9870859d-kube-api-access-64n2f" (OuterVolumeSpecName: "kube-api-access-64n2f") pod "ec830eac-ff50-4522-a9ec-1a6c9870859d" (UID: "ec830eac-ff50-4522-a9ec-1a6c9870859d"). InnerVolumeSpecName "kube-api-access-64n2f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.133206 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28b9ab5c-c3db-4418-a996-bf6da7141bba-kube-api-access-jqn4v" (OuterVolumeSpecName: "kube-api-access-jqn4v") pod "28b9ab5c-c3db-4418-a996-bf6da7141bba" (UID: "28b9ab5c-c3db-4418-a996-bf6da7141bba"). InnerVolumeSpecName "kube-api-access-jqn4v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.170788 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-dq5hb" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.206352 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f4e87f52-369f-4a4f-9d3b-7e430dbac208-operator-scripts\") pod \"f4e87f52-369f-4a4f-9d3b-7e430dbac208\" (UID: \"f4e87f52-369f-4a4f-9d3b-7e430dbac208\") " Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.206394 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f079598d-3f79-401a-aada-3f81a4fc3555-operator-scripts\") pod \"f079598d-3f79-401a-aada-3f81a4fc3555\" (UID: \"f079598d-3f79-401a-aada-3f81a4fc3555\") " Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.206410 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2268415a-bc56-4f19-8d6e-57c09bc60145-operator-scripts\") pod \"2268415a-bc56-4f19-8d6e-57c09bc60145\" (UID: \"2268415a-bc56-4f19-8d6e-57c09bc60145\") " Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.206496 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e0893d9-ac7b-472f-8fc9-ab5dffccf750-operator-scripts\") pod \"0e0893d9-ac7b-472f-8fc9-ab5dffccf750\" (UID: \"0e0893d9-ac7b-472f-8fc9-ab5dffccf750\") " Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.206547 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4zh8r\" (UniqueName: \"kubernetes.io/projected/0e0893d9-ac7b-472f-8fc9-ab5dffccf750-kube-api-access-4zh8r\") pod \"0e0893d9-ac7b-472f-8fc9-ab5dffccf750\" (UID: \"0e0893d9-ac7b-472f-8fc9-ab5dffccf750\") " Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.206571 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r988v\" (UniqueName: \"kubernetes.io/projected/f079598d-3f79-401a-aada-3f81a4fc3555-kube-api-access-r988v\") pod \"f079598d-3f79-401a-aada-3f81a4fc3555\" (UID: \"f079598d-3f79-401a-aada-3f81a4fc3555\") " Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.206594 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-85nnx\" (UniqueName: \"kubernetes.io/projected/2268415a-bc56-4f19-8d6e-57c09bc60145-kube-api-access-85nnx\") pod \"2268415a-bc56-4f19-8d6e-57c09bc60145\" (UID: \"2268415a-bc56-4f19-8d6e-57c09bc60145\") " Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.206812 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9w8js\" (UniqueName: \"kubernetes.io/projected/f4e87f52-369f-4a4f-9d3b-7e430dbac208-kube-api-access-9w8js\") pod \"f4e87f52-369f-4a4f-9d3b-7e430dbac208\" (UID: \"f4e87f52-369f-4a4f-9d3b-7e430dbac208\") " Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.207297 4691 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/28b9ab5c-c3db-4418-a996-bf6da7141bba-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.207317 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-64n2f\" (UniqueName: \"kubernetes.io/projected/ec830eac-ff50-4522-a9ec-1a6c9870859d-kube-api-access-64n2f\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.207332 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jqn4v\" (UniqueName: \"kubernetes.io/projected/28b9ab5c-c3db-4418-a996-bf6da7141bba-kube-api-access-jqn4v\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.207344 4691 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ec830eac-ff50-4522-a9ec-1a6c9870859d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.208286 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f079598d-3f79-401a-aada-3f81a4fc3555-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f079598d-3f79-401a-aada-3f81a4fc3555" (UID: "f079598d-3f79-401a-aada-3f81a4fc3555"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.208408 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4e87f52-369f-4a4f-9d3b-7e430dbac208-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f4e87f52-369f-4a4f-9d3b-7e430dbac208" (UID: "f4e87f52-369f-4a4f-9d3b-7e430dbac208"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.209352 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0e0893d9-ac7b-472f-8fc9-ab5dffccf750-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0e0893d9-ac7b-472f-8fc9-ab5dffccf750" (UID: "0e0893d9-ac7b-472f-8fc9-ab5dffccf750"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.214071 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2268415a-bc56-4f19-8d6e-57c09bc60145-kube-api-access-85nnx" (OuterVolumeSpecName: "kube-api-access-85nnx") pod "2268415a-bc56-4f19-8d6e-57c09bc60145" (UID: "2268415a-bc56-4f19-8d6e-57c09bc60145"). InnerVolumeSpecName "kube-api-access-85nnx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.215953 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f079598d-3f79-401a-aada-3f81a4fc3555-kube-api-access-r988v" (OuterVolumeSpecName: "kube-api-access-r988v") pod "f079598d-3f79-401a-aada-3f81a4fc3555" (UID: "f079598d-3f79-401a-aada-3f81a4fc3555"). InnerVolumeSpecName "kube-api-access-r988v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.218520 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2268415a-bc56-4f19-8d6e-57c09bc60145-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2268415a-bc56-4f19-8d6e-57c09bc60145" (UID: "2268415a-bc56-4f19-8d6e-57c09bc60145"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.222502 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e0893d9-ac7b-472f-8fc9-ab5dffccf750-kube-api-access-4zh8r" (OuterVolumeSpecName: "kube-api-access-4zh8r") pod "0e0893d9-ac7b-472f-8fc9-ab5dffccf750" (UID: "0e0893d9-ac7b-472f-8fc9-ab5dffccf750"). InnerVolumeSpecName "kube-api-access-4zh8r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.223510 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4e87f52-369f-4a4f-9d3b-7e430dbac208-kube-api-access-9w8js" (OuterVolumeSpecName: "kube-api-access-9w8js") pod "f4e87f52-369f-4a4f-9d3b-7e430dbac208" (UID: "f4e87f52-369f-4a4f-9d3b-7e430dbac208"). InnerVolumeSpecName "kube-api-access-9w8js". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.308642 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fac2b9b8-8c64-4322-9251-b22dc0e758ed-ovsdbserver-nb\") pod \"fac2b9b8-8c64-4322-9251-b22dc0e758ed\" (UID: \"fac2b9b8-8c64-4322-9251-b22dc0e758ed\") " Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.308794 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fac2b9b8-8c64-4322-9251-b22dc0e758ed-ovsdbserver-sb\") pod \"fac2b9b8-8c64-4322-9251-b22dc0e758ed\" (UID: \"fac2b9b8-8c64-4322-9251-b22dc0e758ed\") " Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.308916 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lksst\" (UniqueName: \"kubernetes.io/projected/fac2b9b8-8c64-4322-9251-b22dc0e758ed-kube-api-access-lksst\") pod \"fac2b9b8-8c64-4322-9251-b22dc0e758ed\" (UID: \"fac2b9b8-8c64-4322-9251-b22dc0e758ed\") " Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.309063 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fac2b9b8-8c64-4322-9251-b22dc0e758ed-config\") pod \"fac2b9b8-8c64-4322-9251-b22dc0e758ed\" (UID: \"fac2b9b8-8c64-4322-9251-b22dc0e758ed\") " Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.309215 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fac2b9b8-8c64-4322-9251-b22dc0e758ed-dns-svc\") pod \"fac2b9b8-8c64-4322-9251-b22dc0e758ed\" (UID: \"fac2b9b8-8c64-4322-9251-b22dc0e758ed\") " Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.309949 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9w8js\" (UniqueName: \"kubernetes.io/projected/f4e87f52-369f-4a4f-9d3b-7e430dbac208-kube-api-access-9w8js\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.310009 4691 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f4e87f52-369f-4a4f-9d3b-7e430dbac208-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.310022 4691 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f079598d-3f79-401a-aada-3f81a4fc3555-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.310085 4691 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2268415a-bc56-4f19-8d6e-57c09bc60145-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.310100 4691 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e0893d9-ac7b-472f-8fc9-ab5dffccf750-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.310113 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4zh8r\" (UniqueName: \"kubernetes.io/projected/0e0893d9-ac7b-472f-8fc9-ab5dffccf750-kube-api-access-4zh8r\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.310126 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r988v\" (UniqueName: \"kubernetes.io/projected/f079598d-3f79-401a-aada-3f81a4fc3555-kube-api-access-r988v\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.310174 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-85nnx\" (UniqueName: \"kubernetes.io/projected/2268415a-bc56-4f19-8d6e-57c09bc60145-kube-api-access-85nnx\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.316734 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fac2b9b8-8c64-4322-9251-b22dc0e758ed-kube-api-access-lksst" (OuterVolumeSpecName: "kube-api-access-lksst") pod "fac2b9b8-8c64-4322-9251-b22dc0e758ed" (UID: "fac2b9b8-8c64-4322-9251-b22dc0e758ed"). InnerVolumeSpecName "kube-api-access-lksst". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.351338 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fac2b9b8-8c64-4322-9251-b22dc0e758ed-config" (OuterVolumeSpecName: "config") pod "fac2b9b8-8c64-4322-9251-b22dc0e758ed" (UID: "fac2b9b8-8c64-4322-9251-b22dc0e758ed"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.353960 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fac2b9b8-8c64-4322-9251-b22dc0e758ed-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "fac2b9b8-8c64-4322-9251-b22dc0e758ed" (UID: "fac2b9b8-8c64-4322-9251-b22dc0e758ed"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.358856 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fac2b9b8-8c64-4322-9251-b22dc0e758ed-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "fac2b9b8-8c64-4322-9251-b22dc0e758ed" (UID: "fac2b9b8-8c64-4322-9251-b22dc0e758ed"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.368358 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fac2b9b8-8c64-4322-9251-b22dc0e758ed-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "fac2b9b8-8c64-4322-9251-b22dc0e758ed" (UID: "fac2b9b8-8c64-4322-9251-b22dc0e758ed"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.412029 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fac2b9b8-8c64-4322-9251-b22dc0e758ed-config\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.412098 4691 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fac2b9b8-8c64-4322-9251-b22dc0e758ed-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.412115 4691 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fac2b9b8-8c64-4322-9251-b22dc0e758ed-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.412136 4691 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fac2b9b8-8c64-4322-9251-b22dc0e758ed-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.412155 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lksst\" (UniqueName: \"kubernetes.io/projected/fac2b9b8-8c64-4322-9251-b22dc0e758ed-kube-api-access-lksst\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.871793 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-dq5hb" event={"ID":"fac2b9b8-8c64-4322-9251-b22dc0e758ed","Type":"ContainerDied","Data":"09062fa9b6b5733542dda4752973e9c1b8757ffc0d9261af3c2c111f07d132c5"} Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.871863 4691 scope.go:117] "RemoveContainer" containerID="e6e2281672da1062336f30149b437c6019bb20363bbbb5195f4c30506d57d6f6" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.871914 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-dq5hb" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.876184 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-cr6nx" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.877104 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-l7k7s" event={"ID":"0eeca96b-473b-466d-92bd-a1c3fdd22dac","Type":"ContainerStarted","Data":"5b50b74e2accae29b82dd33ed0b29f4886af782fefb71bf61c0d8437fe5dec03"} Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.877181 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-fpjgm" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.880699 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-11b5-account-create-bbtld" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.876183 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-ad5e-account-create-vkzqn" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.881021 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-dsgr5" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.881087 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-3e08-account-create-wn9m9" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.916248 4691 scope.go:117] "RemoveContainer" containerID="befa03cfd844199169b25bd1c69e1cabf04527abfb4870f653e219f8d39f0ff3" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.919638 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-l7k7s" podStartSLOduration=3.066281961 podStartE2EDuration="7.919616767s" podCreationTimestamp="2025-11-24 08:13:02 +0000 UTC" firstStartedPulling="2025-11-24 08:13:03.896499582 +0000 UTC m=+945.895448831" lastFinishedPulling="2025-11-24 08:13:08.749834388 +0000 UTC m=+950.748783637" observedRunningTime="2025-11-24 08:13:09.908425003 +0000 UTC m=+951.907374262" watchObservedRunningTime="2025-11-24 08:13:09.919616767 +0000 UTC m=+951.918566026" Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.978531 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-dq5hb"] Nov 24 08:13:09 crc kubenswrapper[4691]: I1124 08:13:09.978593 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-dq5hb"] Nov 24 08:13:10 crc kubenswrapper[4691]: I1124 08:13:10.770739 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fac2b9b8-8c64-4322-9251-b22dc0e758ed" path="/var/lib/kubelet/pods/fac2b9b8-8c64-4322-9251-b22dc0e758ed/volumes" Nov 24 08:13:11 crc kubenswrapper[4691]: I1124 08:13:11.903787 4691 generic.go:334] "Generic (PLEG): container finished" podID="0eeca96b-473b-466d-92bd-a1c3fdd22dac" containerID="5b50b74e2accae29b82dd33ed0b29f4886af782fefb71bf61c0d8437fe5dec03" exitCode=0 Nov 24 08:13:11 crc kubenswrapper[4691]: I1124 08:13:11.903847 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-l7k7s" event={"ID":"0eeca96b-473b-466d-92bd-a1c3fdd22dac","Type":"ContainerDied","Data":"5b50b74e2accae29b82dd33ed0b29f4886af782fefb71bf61c0d8437fe5dec03"} Nov 24 08:13:13 crc kubenswrapper[4691]: I1124 08:13:13.247235 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-l7k7s" Nov 24 08:13:13 crc kubenswrapper[4691]: I1124 08:13:13.388621 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0eeca96b-473b-466d-92bd-a1c3fdd22dac-combined-ca-bundle\") pod \"0eeca96b-473b-466d-92bd-a1c3fdd22dac\" (UID: \"0eeca96b-473b-466d-92bd-a1c3fdd22dac\") " Nov 24 08:13:13 crc kubenswrapper[4691]: I1124 08:13:13.388719 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0eeca96b-473b-466d-92bd-a1c3fdd22dac-config-data\") pod \"0eeca96b-473b-466d-92bd-a1c3fdd22dac\" (UID: \"0eeca96b-473b-466d-92bd-a1c3fdd22dac\") " Nov 24 08:13:13 crc kubenswrapper[4691]: I1124 08:13:13.388777 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r5wsl\" (UniqueName: \"kubernetes.io/projected/0eeca96b-473b-466d-92bd-a1c3fdd22dac-kube-api-access-r5wsl\") pod \"0eeca96b-473b-466d-92bd-a1c3fdd22dac\" (UID: \"0eeca96b-473b-466d-92bd-a1c3fdd22dac\") " Nov 24 08:13:13 crc kubenswrapper[4691]: I1124 08:13:13.405548 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0eeca96b-473b-466d-92bd-a1c3fdd22dac-kube-api-access-r5wsl" (OuterVolumeSpecName: "kube-api-access-r5wsl") pod "0eeca96b-473b-466d-92bd-a1c3fdd22dac" (UID: "0eeca96b-473b-466d-92bd-a1c3fdd22dac"). InnerVolumeSpecName "kube-api-access-r5wsl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:13:13 crc kubenswrapper[4691]: I1124 08:13:13.424027 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0eeca96b-473b-466d-92bd-a1c3fdd22dac-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0eeca96b-473b-466d-92bd-a1c3fdd22dac" (UID: "0eeca96b-473b-466d-92bd-a1c3fdd22dac"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:13 crc kubenswrapper[4691]: I1124 08:13:13.441377 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0eeca96b-473b-466d-92bd-a1c3fdd22dac-config-data" (OuterVolumeSpecName: "config-data") pod "0eeca96b-473b-466d-92bd-a1c3fdd22dac" (UID: "0eeca96b-473b-466d-92bd-a1c3fdd22dac"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:13 crc kubenswrapper[4691]: I1124 08:13:13.491287 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0eeca96b-473b-466d-92bd-a1c3fdd22dac-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:13 crc kubenswrapper[4691]: I1124 08:13:13.491326 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r5wsl\" (UniqueName: \"kubernetes.io/projected/0eeca96b-473b-466d-92bd-a1c3fdd22dac-kube-api-access-r5wsl\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:13 crc kubenswrapper[4691]: I1124 08:13:13.491341 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0eeca96b-473b-466d-92bd-a1c3fdd22dac-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:13 crc kubenswrapper[4691]: I1124 08:13:13.927941 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-l7k7s" event={"ID":"0eeca96b-473b-466d-92bd-a1c3fdd22dac","Type":"ContainerDied","Data":"5172bb7fdea3fe8e550e7818f80190aa03c4099aa87b44811f8b590e3b74dc19"} Nov 24 08:13:13 crc kubenswrapper[4691]: I1124 08:13:13.928001 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5172bb7fdea3fe8e550e7818f80190aa03c4099aa87b44811f8b590e3b74dc19" Nov 24 08:13:13 crc kubenswrapper[4691]: I1124 08:13:13.928783 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-l7k7s" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.191619 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-bbf5cc879-vqnvn"] Nov 24 08:13:14 crc kubenswrapper[4691]: E1124 08:13:14.197943 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0eeca96b-473b-466d-92bd-a1c3fdd22dac" containerName="keystone-db-sync" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.197978 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="0eeca96b-473b-466d-92bd-a1c3fdd22dac" containerName="keystone-db-sync" Nov 24 08:13:14 crc kubenswrapper[4691]: E1124 08:13:14.197993 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28b9ab5c-c3db-4418-a996-bf6da7141bba" containerName="mariadb-database-create" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.197998 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="28b9ab5c-c3db-4418-a996-bf6da7141bba" containerName="mariadb-database-create" Nov 24 08:13:14 crc kubenswrapper[4691]: E1124 08:13:14.198017 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4e87f52-369f-4a4f-9d3b-7e430dbac208" containerName="mariadb-database-create" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.198022 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4e87f52-369f-4a4f-9d3b-7e430dbac208" containerName="mariadb-database-create" Nov 24 08:13:14 crc kubenswrapper[4691]: E1124 08:13:14.198036 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec830eac-ff50-4522-a9ec-1a6c9870859d" containerName="mariadb-account-create" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.198041 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec830eac-ff50-4522-a9ec-1a6c9870859d" containerName="mariadb-account-create" Nov 24 08:13:14 crc kubenswrapper[4691]: E1124 08:13:14.198057 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f079598d-3f79-401a-aada-3f81a4fc3555" containerName="mariadb-account-create" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.198063 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="f079598d-3f79-401a-aada-3f81a4fc3555" containerName="mariadb-account-create" Nov 24 08:13:14 crc kubenswrapper[4691]: E1124 08:13:14.198080 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e0893d9-ac7b-472f-8fc9-ab5dffccf750" containerName="mariadb-database-create" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.198086 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e0893d9-ac7b-472f-8fc9-ab5dffccf750" containerName="mariadb-database-create" Nov 24 08:13:14 crc kubenswrapper[4691]: E1124 08:13:14.198094 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fac2b9b8-8c64-4322-9251-b22dc0e758ed" containerName="init" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.198100 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="fac2b9b8-8c64-4322-9251-b22dc0e758ed" containerName="init" Nov 24 08:13:14 crc kubenswrapper[4691]: E1124 08:13:14.198117 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fac2b9b8-8c64-4322-9251-b22dc0e758ed" containerName="dnsmasq-dns" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.198123 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="fac2b9b8-8c64-4322-9251-b22dc0e758ed" containerName="dnsmasq-dns" Nov 24 08:13:14 crc kubenswrapper[4691]: E1124 08:13:14.198133 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2268415a-bc56-4f19-8d6e-57c09bc60145" containerName="mariadb-account-create" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.198138 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="2268415a-bc56-4f19-8d6e-57c09bc60145" containerName="mariadb-account-create" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.198280 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e0893d9-ac7b-472f-8fc9-ab5dffccf750" containerName="mariadb-database-create" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.198294 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="0eeca96b-473b-466d-92bd-a1c3fdd22dac" containerName="keystone-db-sync" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.198303 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="2268415a-bc56-4f19-8d6e-57c09bc60145" containerName="mariadb-account-create" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.198314 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="fac2b9b8-8c64-4322-9251-b22dc0e758ed" containerName="dnsmasq-dns" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.198325 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="f079598d-3f79-401a-aada-3f81a4fc3555" containerName="mariadb-account-create" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.198337 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec830eac-ff50-4522-a9ec-1a6c9870859d" containerName="mariadb-account-create" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.198344 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4e87f52-369f-4a4f-9d3b-7e430dbac208" containerName="mariadb-database-create" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.198356 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="28b9ab5c-c3db-4418-a996-bf6da7141bba" containerName="mariadb-database-create" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.199208 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bbf5cc879-vqnvn" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.203604 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bbf5cc879-vqnvn"] Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.278132 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-jtk2j"] Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.279177 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-jtk2j" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.284047 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-rw8tv" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.284349 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.285161 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.286521 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-jtk2j"] Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.291106 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.291882 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.308729 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8beacf4c-49bf-4497-9a54-0ba1239969b3-ovsdbserver-sb\") pod \"dnsmasq-dns-bbf5cc879-vqnvn\" (UID: \"8beacf4c-49bf-4497-9a54-0ba1239969b3\") " pod="openstack/dnsmasq-dns-bbf5cc879-vqnvn" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.308787 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6k2tk\" (UniqueName: \"kubernetes.io/projected/8beacf4c-49bf-4497-9a54-0ba1239969b3-kube-api-access-6k2tk\") pod \"dnsmasq-dns-bbf5cc879-vqnvn\" (UID: \"8beacf4c-49bf-4497-9a54-0ba1239969b3\") " pod="openstack/dnsmasq-dns-bbf5cc879-vqnvn" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.308823 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8beacf4c-49bf-4497-9a54-0ba1239969b3-dns-svc\") pod \"dnsmasq-dns-bbf5cc879-vqnvn\" (UID: \"8beacf4c-49bf-4497-9a54-0ba1239969b3\") " pod="openstack/dnsmasq-dns-bbf5cc879-vqnvn" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.308852 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8beacf4c-49bf-4497-9a54-0ba1239969b3-dns-swift-storage-0\") pod \"dnsmasq-dns-bbf5cc879-vqnvn\" (UID: \"8beacf4c-49bf-4497-9a54-0ba1239969b3\") " pod="openstack/dnsmasq-dns-bbf5cc879-vqnvn" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.308886 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8beacf4c-49bf-4497-9a54-0ba1239969b3-ovsdbserver-nb\") pod \"dnsmasq-dns-bbf5cc879-vqnvn\" (UID: \"8beacf4c-49bf-4497-9a54-0ba1239969b3\") " pod="openstack/dnsmasq-dns-bbf5cc879-vqnvn" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.308939 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8beacf4c-49bf-4497-9a54-0ba1239969b3-config\") pod \"dnsmasq-dns-bbf5cc879-vqnvn\" (UID: \"8beacf4c-49bf-4497-9a54-0ba1239969b3\") " pod="openstack/dnsmasq-dns-bbf5cc879-vqnvn" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.412791 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/93da1283-17cd-4b15-a4dd-db78d80c187e-fernet-keys\") pod \"keystone-bootstrap-jtk2j\" (UID: \"93da1283-17cd-4b15-a4dd-db78d80c187e\") " pod="openstack/keystone-bootstrap-jtk2j" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.412913 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8beacf4c-49bf-4497-9a54-0ba1239969b3-config\") pod \"dnsmasq-dns-bbf5cc879-vqnvn\" (UID: \"8beacf4c-49bf-4497-9a54-0ba1239969b3\") " pod="openstack/dnsmasq-dns-bbf5cc879-vqnvn" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.413055 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93da1283-17cd-4b15-a4dd-db78d80c187e-scripts\") pod \"keystone-bootstrap-jtk2j\" (UID: \"93da1283-17cd-4b15-a4dd-db78d80c187e\") " pod="openstack/keystone-bootstrap-jtk2j" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.413242 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-62bgf\" (UniqueName: \"kubernetes.io/projected/93da1283-17cd-4b15-a4dd-db78d80c187e-kube-api-access-62bgf\") pod \"keystone-bootstrap-jtk2j\" (UID: \"93da1283-17cd-4b15-a4dd-db78d80c187e\") " pod="openstack/keystone-bootstrap-jtk2j" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.413312 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/93da1283-17cd-4b15-a4dd-db78d80c187e-credential-keys\") pod \"keystone-bootstrap-jtk2j\" (UID: \"93da1283-17cd-4b15-a4dd-db78d80c187e\") " pod="openstack/keystone-bootstrap-jtk2j" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.413605 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93da1283-17cd-4b15-a4dd-db78d80c187e-combined-ca-bundle\") pod \"keystone-bootstrap-jtk2j\" (UID: \"93da1283-17cd-4b15-a4dd-db78d80c187e\") " pod="openstack/keystone-bootstrap-jtk2j" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.413704 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8beacf4c-49bf-4497-9a54-0ba1239969b3-ovsdbserver-sb\") pod \"dnsmasq-dns-bbf5cc879-vqnvn\" (UID: \"8beacf4c-49bf-4497-9a54-0ba1239969b3\") " pod="openstack/dnsmasq-dns-bbf5cc879-vqnvn" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.413762 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6k2tk\" (UniqueName: \"kubernetes.io/projected/8beacf4c-49bf-4497-9a54-0ba1239969b3-kube-api-access-6k2tk\") pod \"dnsmasq-dns-bbf5cc879-vqnvn\" (UID: \"8beacf4c-49bf-4497-9a54-0ba1239969b3\") " pod="openstack/dnsmasq-dns-bbf5cc879-vqnvn" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.413836 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8beacf4c-49bf-4497-9a54-0ba1239969b3-dns-svc\") pod \"dnsmasq-dns-bbf5cc879-vqnvn\" (UID: \"8beacf4c-49bf-4497-9a54-0ba1239969b3\") " pod="openstack/dnsmasq-dns-bbf5cc879-vqnvn" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.413878 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8beacf4c-49bf-4497-9a54-0ba1239969b3-dns-swift-storage-0\") pod \"dnsmasq-dns-bbf5cc879-vqnvn\" (UID: \"8beacf4c-49bf-4497-9a54-0ba1239969b3\") " pod="openstack/dnsmasq-dns-bbf5cc879-vqnvn" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.413958 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93da1283-17cd-4b15-a4dd-db78d80c187e-config-data\") pod \"keystone-bootstrap-jtk2j\" (UID: \"93da1283-17cd-4b15-a4dd-db78d80c187e\") " pod="openstack/keystone-bootstrap-jtk2j" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.414028 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8beacf4c-49bf-4497-9a54-0ba1239969b3-ovsdbserver-nb\") pod \"dnsmasq-dns-bbf5cc879-vqnvn\" (UID: \"8beacf4c-49bf-4497-9a54-0ba1239969b3\") " pod="openstack/dnsmasq-dns-bbf5cc879-vqnvn" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.414649 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8beacf4c-49bf-4497-9a54-0ba1239969b3-config\") pod \"dnsmasq-dns-bbf5cc879-vqnvn\" (UID: \"8beacf4c-49bf-4497-9a54-0ba1239969b3\") " pod="openstack/dnsmasq-dns-bbf5cc879-vqnvn" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.415541 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8beacf4c-49bf-4497-9a54-0ba1239969b3-ovsdbserver-sb\") pod \"dnsmasq-dns-bbf5cc879-vqnvn\" (UID: \"8beacf4c-49bf-4497-9a54-0ba1239969b3\") " pod="openstack/dnsmasq-dns-bbf5cc879-vqnvn" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.416120 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8beacf4c-49bf-4497-9a54-0ba1239969b3-ovsdbserver-nb\") pod \"dnsmasq-dns-bbf5cc879-vqnvn\" (UID: \"8beacf4c-49bf-4497-9a54-0ba1239969b3\") " pod="openstack/dnsmasq-dns-bbf5cc879-vqnvn" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.416353 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8beacf4c-49bf-4497-9a54-0ba1239969b3-dns-svc\") pod \"dnsmasq-dns-bbf5cc879-vqnvn\" (UID: \"8beacf4c-49bf-4497-9a54-0ba1239969b3\") " pod="openstack/dnsmasq-dns-bbf5cc879-vqnvn" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.423227 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8beacf4c-49bf-4497-9a54-0ba1239969b3-dns-swift-storage-0\") pod \"dnsmasq-dns-bbf5cc879-vqnvn\" (UID: \"8beacf4c-49bf-4497-9a54-0ba1239969b3\") " pod="openstack/dnsmasq-dns-bbf5cc879-vqnvn" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.435864 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-797fd4697-lsphq"] Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.445851 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-797fd4697-lsphq" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.460735 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.461027 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-gzz52" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.462123 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.462542 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.469201 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-797fd4697-lsphq"] Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.477660 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6k2tk\" (UniqueName: \"kubernetes.io/projected/8beacf4c-49bf-4497-9a54-0ba1239969b3-kube-api-access-6k2tk\") pod \"dnsmasq-dns-bbf5cc879-vqnvn\" (UID: \"8beacf4c-49bf-4497-9a54-0ba1239969b3\") " pod="openstack/dnsmasq-dns-bbf5cc879-vqnvn" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.517846 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93da1283-17cd-4b15-a4dd-db78d80c187e-config-data\") pod \"keystone-bootstrap-jtk2j\" (UID: \"93da1283-17cd-4b15-a4dd-db78d80c187e\") " pod="openstack/keystone-bootstrap-jtk2j" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.517948 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/93da1283-17cd-4b15-a4dd-db78d80c187e-fernet-keys\") pod \"keystone-bootstrap-jtk2j\" (UID: \"93da1283-17cd-4b15-a4dd-db78d80c187e\") " pod="openstack/keystone-bootstrap-jtk2j" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.518024 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93da1283-17cd-4b15-a4dd-db78d80c187e-scripts\") pod \"keystone-bootstrap-jtk2j\" (UID: \"93da1283-17cd-4b15-a4dd-db78d80c187e\") " pod="openstack/keystone-bootstrap-jtk2j" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.518046 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-62bgf\" (UniqueName: \"kubernetes.io/projected/93da1283-17cd-4b15-a4dd-db78d80c187e-kube-api-access-62bgf\") pod \"keystone-bootstrap-jtk2j\" (UID: \"93da1283-17cd-4b15-a4dd-db78d80c187e\") " pod="openstack/keystone-bootstrap-jtk2j" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.518065 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/93da1283-17cd-4b15-a4dd-db78d80c187e-credential-keys\") pod \"keystone-bootstrap-jtk2j\" (UID: \"93da1283-17cd-4b15-a4dd-db78d80c187e\") " pod="openstack/keystone-bootstrap-jtk2j" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.518084 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93da1283-17cd-4b15-a4dd-db78d80c187e-combined-ca-bundle\") pod \"keystone-bootstrap-jtk2j\" (UID: \"93da1283-17cd-4b15-a4dd-db78d80c187e\") " pod="openstack/keystone-bootstrap-jtk2j" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.529695 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93da1283-17cd-4b15-a4dd-db78d80c187e-combined-ca-bundle\") pod \"keystone-bootstrap-jtk2j\" (UID: \"93da1283-17cd-4b15-a4dd-db78d80c187e\") " pod="openstack/keystone-bootstrap-jtk2j" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.534247 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/93da1283-17cd-4b15-a4dd-db78d80c187e-credential-keys\") pod \"keystone-bootstrap-jtk2j\" (UID: \"93da1283-17cd-4b15-a4dd-db78d80c187e\") " pod="openstack/keystone-bootstrap-jtk2j" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.536857 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93da1283-17cd-4b15-a4dd-db78d80c187e-config-data\") pod \"keystone-bootstrap-jtk2j\" (UID: \"93da1283-17cd-4b15-a4dd-db78d80c187e\") " pod="openstack/keystone-bootstrap-jtk2j" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.555688 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bbf5cc879-vqnvn" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.557138 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/93da1283-17cd-4b15-a4dd-db78d80c187e-fernet-keys\") pod \"keystone-bootstrap-jtk2j\" (UID: \"93da1283-17cd-4b15-a4dd-db78d80c187e\") " pod="openstack/keystone-bootstrap-jtk2j" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.557214 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-vsrzv"] Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.557711 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93da1283-17cd-4b15-a4dd-db78d80c187e-scripts\") pod \"keystone-bootstrap-jtk2j\" (UID: \"93da1283-17cd-4b15-a4dd-db78d80c187e\") " pod="openstack/keystone-bootstrap-jtk2j" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.558513 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-vsrzv" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.566925 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.567200 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.567404 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-8q2qr" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.579143 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-62bgf\" (UniqueName: \"kubernetes.io/projected/93da1283-17cd-4b15-a4dd-db78d80c187e-kube-api-access-62bgf\") pod \"keystone-bootstrap-jtk2j\" (UID: \"93da1283-17cd-4b15-a4dd-db78d80c187e\") " pod="openstack/keystone-bootstrap-jtk2j" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.594085 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-4jnwp"] Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.595612 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-4jnwp" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.607075 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.607648 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.607770 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-k562q" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.617489 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-4jnwp"] Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.630997 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-vsrzv"] Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.631566 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-jtk2j" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.632093 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4-logs\") pod \"horizon-797fd4697-lsphq\" (UID: \"7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4\") " pod="openstack/horizon-797fd4697-lsphq" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.632161 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8ss7j\" (UniqueName: \"kubernetes.io/projected/7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4-kube-api-access-8ss7j\") pod \"horizon-797fd4697-lsphq\" (UID: \"7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4\") " pod="openstack/horizon-797fd4697-lsphq" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.632227 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4-scripts\") pod \"horizon-797fd4697-lsphq\" (UID: \"7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4\") " pod="openstack/horizon-797fd4697-lsphq" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.632265 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4-config-data\") pod \"horizon-797fd4697-lsphq\" (UID: \"7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4\") " pod="openstack/horizon-797fd4697-lsphq" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.632302 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4-horizon-secret-key\") pod \"horizon-797fd4697-lsphq\" (UID: \"7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4\") " pod="openstack/horizon-797fd4697-lsphq" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.658766 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-qvhpm"] Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.660091 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-qvhpm" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.670110 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-z7x2p" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.670379 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.681557 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-qvhpm"] Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.728782 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-7f64cf65df-8hgb4"] Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.735103 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29ff644c-aef6-4092-9dcf-1b4562e662d4-combined-ca-bundle\") pod \"cinder-db-sync-vsrzv\" (UID: \"29ff644c-aef6-4092-9dcf-1b4562e662d4\") " pod="openstack/cinder-db-sync-vsrzv" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.735157 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cptqw\" (UniqueName: \"kubernetes.io/projected/e4bd742d-a7a1-402b-b1fa-9dde10e15952-kube-api-access-cptqw\") pod \"neutron-db-sync-4jnwp\" (UID: \"e4bd742d-a7a1-402b-b1fa-9dde10e15952\") " pod="openstack/neutron-db-sync-4jnwp" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.735210 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4-scripts\") pod \"horizon-797fd4697-lsphq\" (UID: \"7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4\") " pod="openstack/horizon-797fd4697-lsphq" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.735246 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e4bd742d-a7a1-402b-b1fa-9dde10e15952-config\") pod \"neutron-db-sync-4jnwp\" (UID: \"e4bd742d-a7a1-402b-b1fa-9dde10e15952\") " pod="openstack/neutron-db-sync-4jnwp" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.735288 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4-config-data\") pod \"horizon-797fd4697-lsphq\" (UID: \"7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4\") " pod="openstack/horizon-797fd4697-lsphq" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.735328 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/29ff644c-aef6-4092-9dcf-1b4562e662d4-etc-machine-id\") pod \"cinder-db-sync-vsrzv\" (UID: \"29ff644c-aef6-4092-9dcf-1b4562e662d4\") " pod="openstack/cinder-db-sync-vsrzv" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.735350 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4-horizon-secret-key\") pod \"horizon-797fd4697-lsphq\" (UID: \"7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4\") " pod="openstack/horizon-797fd4697-lsphq" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.735372 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/29ff644c-aef6-4092-9dcf-1b4562e662d4-scripts\") pod \"cinder-db-sync-vsrzv\" (UID: \"29ff644c-aef6-4092-9dcf-1b4562e662d4\") " pod="openstack/cinder-db-sync-vsrzv" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.735394 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29ff644c-aef6-4092-9dcf-1b4562e662d4-config-data\") pod \"cinder-db-sync-vsrzv\" (UID: \"29ff644c-aef6-4092-9dcf-1b4562e662d4\") " pod="openstack/cinder-db-sync-vsrzv" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.735414 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/29ff644c-aef6-4092-9dcf-1b4562e662d4-db-sync-config-data\") pod \"cinder-db-sync-vsrzv\" (UID: \"29ff644c-aef6-4092-9dcf-1b4562e662d4\") " pod="openstack/cinder-db-sync-vsrzv" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.735431 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4bd742d-a7a1-402b-b1fa-9dde10e15952-combined-ca-bundle\") pod \"neutron-db-sync-4jnwp\" (UID: \"e4bd742d-a7a1-402b-b1fa-9dde10e15952\") " pod="openstack/neutron-db-sync-4jnwp" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.735473 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4-logs\") pod \"horizon-797fd4697-lsphq\" (UID: \"7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4\") " pod="openstack/horizon-797fd4697-lsphq" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.735497 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8ss7j\" (UniqueName: \"kubernetes.io/projected/7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4-kube-api-access-8ss7j\") pod \"horizon-797fd4697-lsphq\" (UID: \"7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4\") " pod="openstack/horizon-797fd4697-lsphq" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.735534 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x4j42\" (UniqueName: \"kubernetes.io/projected/29ff644c-aef6-4092-9dcf-1b4562e662d4-kube-api-access-x4j42\") pod \"cinder-db-sync-vsrzv\" (UID: \"29ff644c-aef6-4092-9dcf-1b4562e662d4\") " pod="openstack/cinder-db-sync-vsrzv" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.736352 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4-scripts\") pod \"horizon-797fd4697-lsphq\" (UID: \"7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4\") " pod="openstack/horizon-797fd4697-lsphq" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.738502 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4-config-data\") pod \"horizon-797fd4697-lsphq\" (UID: \"7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4\") " pod="openstack/horizon-797fd4697-lsphq" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.744141 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4-logs\") pod \"horizon-797fd4697-lsphq\" (UID: \"7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4\") " pod="openstack/horizon-797fd4697-lsphq" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.744936 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7f64cf65df-8hgb4"] Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.745084 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7f64cf65df-8hgb4" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.752003 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4-horizon-secret-key\") pod \"horizon-797fd4697-lsphq\" (UID: \"7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4\") " pod="openstack/horizon-797fd4697-lsphq" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.809245 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8ss7j\" (UniqueName: \"kubernetes.io/projected/7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4-kube-api-access-8ss7j\") pod \"horizon-797fd4697-lsphq\" (UID: \"7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4\") " pod="openstack/horizon-797fd4697-lsphq" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.838604 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29ff644c-aef6-4092-9dcf-1b4562e662d4-config-data\") pod \"cinder-db-sync-vsrzv\" (UID: \"29ff644c-aef6-4092-9dcf-1b4562e662d4\") " pod="openstack/cinder-db-sync-vsrzv" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.838650 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/29ff644c-aef6-4092-9dcf-1b4562e662d4-db-sync-config-data\") pod \"cinder-db-sync-vsrzv\" (UID: \"29ff644c-aef6-4092-9dcf-1b4562e662d4\") " pod="openstack/cinder-db-sync-vsrzv" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.838672 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4bd742d-a7a1-402b-b1fa-9dde10e15952-combined-ca-bundle\") pod \"neutron-db-sync-4jnwp\" (UID: \"e4bd742d-a7a1-402b-b1fa-9dde10e15952\") " pod="openstack/neutron-db-sync-4jnwp" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.838707 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pjbwv\" (UniqueName: \"kubernetes.io/projected/09239709-f618-437f-a720-070aff572294-kube-api-access-pjbwv\") pod \"barbican-db-sync-qvhpm\" (UID: \"09239709-f618-437f-a720-070aff572294\") " pod="openstack/barbican-db-sync-qvhpm" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.838749 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b581c7d3-dc44-4505-b3f7-fc7aff32f5df-logs\") pod \"horizon-7f64cf65df-8hgb4\" (UID: \"b581c7d3-dc44-4505-b3f7-fc7aff32f5df\") " pod="openstack/horizon-7f64cf65df-8hgb4" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.838774 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x4j42\" (UniqueName: \"kubernetes.io/projected/29ff644c-aef6-4092-9dcf-1b4562e662d4-kube-api-access-x4j42\") pod \"cinder-db-sync-vsrzv\" (UID: \"29ff644c-aef6-4092-9dcf-1b4562e662d4\") " pod="openstack/cinder-db-sync-vsrzv" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.838798 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29ff644c-aef6-4092-9dcf-1b4562e662d4-combined-ca-bundle\") pod \"cinder-db-sync-vsrzv\" (UID: \"29ff644c-aef6-4092-9dcf-1b4562e662d4\") " pod="openstack/cinder-db-sync-vsrzv" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.838824 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cptqw\" (UniqueName: \"kubernetes.io/projected/e4bd742d-a7a1-402b-b1fa-9dde10e15952-kube-api-access-cptqw\") pod \"neutron-db-sync-4jnwp\" (UID: \"e4bd742d-a7a1-402b-b1fa-9dde10e15952\") " pod="openstack/neutron-db-sync-4jnwp" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.838848 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/09239709-f618-437f-a720-070aff572294-db-sync-config-data\") pod \"barbican-db-sync-qvhpm\" (UID: \"09239709-f618-437f-a720-070aff572294\") " pod="openstack/barbican-db-sync-qvhpm" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.838865 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09239709-f618-437f-a720-070aff572294-combined-ca-bundle\") pod \"barbican-db-sync-qvhpm\" (UID: \"09239709-f618-437f-a720-070aff572294\") " pod="openstack/barbican-db-sync-qvhpm" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.838916 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e4bd742d-a7a1-402b-b1fa-9dde10e15952-config\") pod \"neutron-db-sync-4jnwp\" (UID: \"e4bd742d-a7a1-402b-b1fa-9dde10e15952\") " pod="openstack/neutron-db-sync-4jnwp" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.838960 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/29ff644c-aef6-4092-9dcf-1b4562e662d4-etc-machine-id\") pod \"cinder-db-sync-vsrzv\" (UID: \"29ff644c-aef6-4092-9dcf-1b4562e662d4\") " pod="openstack/cinder-db-sync-vsrzv" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.838982 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b581c7d3-dc44-4505-b3f7-fc7aff32f5df-config-data\") pod \"horizon-7f64cf65df-8hgb4\" (UID: \"b581c7d3-dc44-4505-b3f7-fc7aff32f5df\") " pod="openstack/horizon-7f64cf65df-8hgb4" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.839000 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vbqvg\" (UniqueName: \"kubernetes.io/projected/b581c7d3-dc44-4505-b3f7-fc7aff32f5df-kube-api-access-vbqvg\") pod \"horizon-7f64cf65df-8hgb4\" (UID: \"b581c7d3-dc44-4505-b3f7-fc7aff32f5df\") " pod="openstack/horizon-7f64cf65df-8hgb4" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.839018 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b581c7d3-dc44-4505-b3f7-fc7aff32f5df-horizon-secret-key\") pod \"horizon-7f64cf65df-8hgb4\" (UID: \"b581c7d3-dc44-4505-b3f7-fc7aff32f5df\") " pod="openstack/horizon-7f64cf65df-8hgb4" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.839033 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b581c7d3-dc44-4505-b3f7-fc7aff32f5df-scripts\") pod \"horizon-7f64cf65df-8hgb4\" (UID: \"b581c7d3-dc44-4505-b3f7-fc7aff32f5df\") " pod="openstack/horizon-7f64cf65df-8hgb4" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.839050 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/29ff644c-aef6-4092-9dcf-1b4562e662d4-scripts\") pod \"cinder-db-sync-vsrzv\" (UID: \"29ff644c-aef6-4092-9dcf-1b4562e662d4\") " pod="openstack/cinder-db-sync-vsrzv" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.848668 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/29ff644c-aef6-4092-9dcf-1b4562e662d4-db-sync-config-data\") pod \"cinder-db-sync-vsrzv\" (UID: \"29ff644c-aef6-4092-9dcf-1b4562e662d4\") " pod="openstack/cinder-db-sync-vsrzv" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.851709 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/29ff644c-aef6-4092-9dcf-1b4562e662d4-etc-machine-id\") pod \"cinder-db-sync-vsrzv\" (UID: \"29ff644c-aef6-4092-9dcf-1b4562e662d4\") " pod="openstack/cinder-db-sync-vsrzv" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.859222 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/e4bd742d-a7a1-402b-b1fa-9dde10e15952-config\") pod \"neutron-db-sync-4jnwp\" (UID: \"e4bd742d-a7a1-402b-b1fa-9dde10e15952\") " pod="openstack/neutron-db-sync-4jnwp" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.867346 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4bd742d-a7a1-402b-b1fa-9dde10e15952-combined-ca-bundle\") pod \"neutron-db-sync-4jnwp\" (UID: \"e4bd742d-a7a1-402b-b1fa-9dde10e15952\") " pod="openstack/neutron-db-sync-4jnwp" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.867563 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29ff644c-aef6-4092-9dcf-1b4562e662d4-config-data\") pod \"cinder-db-sync-vsrzv\" (UID: \"29ff644c-aef6-4092-9dcf-1b4562e662d4\") " pod="openstack/cinder-db-sync-vsrzv" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.867877 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/29ff644c-aef6-4092-9dcf-1b4562e662d4-scripts\") pod \"cinder-db-sync-vsrzv\" (UID: \"29ff644c-aef6-4092-9dcf-1b4562e662d4\") " pod="openstack/cinder-db-sync-vsrzv" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.868324 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29ff644c-aef6-4092-9dcf-1b4562e662d4-combined-ca-bundle\") pod \"cinder-db-sync-vsrzv\" (UID: \"29ff644c-aef6-4092-9dcf-1b4562e662d4\") " pod="openstack/cinder-db-sync-vsrzv" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.886342 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.888135 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-8g8rz"] Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.888966 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-8g8rz" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.890155 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.910555 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.913978 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cptqw\" (UniqueName: \"kubernetes.io/projected/e4bd742d-a7a1-402b-b1fa-9dde10e15952-kube-api-access-cptqw\") pod \"neutron-db-sync-4jnwp\" (UID: \"e4bd742d-a7a1-402b-b1fa-9dde10e15952\") " pod="openstack/neutron-db-sync-4jnwp" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.914764 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.943583 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.944566 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/09239709-f618-437f-a720-070aff572294-db-sync-config-data\") pod \"barbican-db-sync-qvhpm\" (UID: \"09239709-f618-437f-a720-070aff572294\") " pod="openstack/barbican-db-sync-qvhpm" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.944593 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09239709-f618-437f-a720-070aff572294-combined-ca-bundle\") pod \"barbican-db-sync-qvhpm\" (UID: \"09239709-f618-437f-a720-070aff572294\") " pod="openstack/barbican-db-sync-qvhpm" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.944665 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b581c7d3-dc44-4505-b3f7-fc7aff32f5df-config-data\") pod \"horizon-7f64cf65df-8hgb4\" (UID: \"b581c7d3-dc44-4505-b3f7-fc7aff32f5df\") " pod="openstack/horizon-7f64cf65df-8hgb4" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.944683 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vbqvg\" (UniqueName: \"kubernetes.io/projected/b581c7d3-dc44-4505-b3f7-fc7aff32f5df-kube-api-access-vbqvg\") pod \"horizon-7f64cf65df-8hgb4\" (UID: \"b581c7d3-dc44-4505-b3f7-fc7aff32f5df\") " pod="openstack/horizon-7f64cf65df-8hgb4" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.944701 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b581c7d3-dc44-4505-b3f7-fc7aff32f5df-horizon-secret-key\") pod \"horizon-7f64cf65df-8hgb4\" (UID: \"b581c7d3-dc44-4505-b3f7-fc7aff32f5df\") " pod="openstack/horizon-7f64cf65df-8hgb4" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.944722 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b581c7d3-dc44-4505-b3f7-fc7aff32f5df-scripts\") pod \"horizon-7f64cf65df-8hgb4\" (UID: \"b581c7d3-dc44-4505-b3f7-fc7aff32f5df\") " pod="openstack/horizon-7f64cf65df-8hgb4" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.944766 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pjbwv\" (UniqueName: \"kubernetes.io/projected/09239709-f618-437f-a720-070aff572294-kube-api-access-pjbwv\") pod \"barbican-db-sync-qvhpm\" (UID: \"09239709-f618-437f-a720-070aff572294\") " pod="openstack/barbican-db-sync-qvhpm" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.944805 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b581c7d3-dc44-4505-b3f7-fc7aff32f5df-logs\") pod \"horizon-7f64cf65df-8hgb4\" (UID: \"b581c7d3-dc44-4505-b3f7-fc7aff32f5df\") " pod="openstack/horizon-7f64cf65df-8hgb4" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.945188 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b581c7d3-dc44-4505-b3f7-fc7aff32f5df-logs\") pod \"horizon-7f64cf65df-8hgb4\" (UID: \"b581c7d3-dc44-4505-b3f7-fc7aff32f5df\") " pod="openstack/horizon-7f64cf65df-8hgb4" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.947391 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.947659 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.947806 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.947941 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.949201 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-8g8rz"] Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.950759 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b581c7d3-dc44-4505-b3f7-fc7aff32f5df-config-data\") pod \"horizon-7f64cf65df-8hgb4\" (UID: \"b581c7d3-dc44-4505-b3f7-fc7aff32f5df\") " pod="openstack/horizon-7f64cf65df-8hgb4" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.951479 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b581c7d3-dc44-4505-b3f7-fc7aff32f5df-scripts\") pod \"horizon-7f64cf65df-8hgb4\" (UID: \"b581c7d3-dc44-4505-b3f7-fc7aff32f5df\") " pod="openstack/horizon-7f64cf65df-8hgb4" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.968183 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.968416 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.969470 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.969758 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-n869k" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.969908 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-jd5g4" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.990150 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-797fd4697-lsphq" Nov 24 08:13:14 crc kubenswrapper[4691]: I1124 08:13:14.990627 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bbf5cc879-vqnvn"] Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.015052 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.033192 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-54t6g"] Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.035328 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-54t6g" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.038376 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09239709-f618-437f-a720-070aff572294-combined-ca-bundle\") pod \"barbican-db-sync-qvhpm\" (UID: \"09239709-f618-437f-a720-070aff572294\") " pod="openstack/barbican-db-sync-qvhpm" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.043622 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x4j42\" (UniqueName: \"kubernetes.io/projected/29ff644c-aef6-4092-9dcf-1b4562e662d4-kube-api-access-x4j42\") pod \"cinder-db-sync-vsrzv\" (UID: \"29ff644c-aef6-4092-9dcf-1b4562e662d4\") " pod="openstack/cinder-db-sync-vsrzv" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.048837 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.048900 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-scripts\") pod \"glance-default-external-api-0\" (UID: \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.048933 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.048996 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.049027 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/faf5645f-a25c-4bde-9769-51e1681b7eba-config-data\") pod \"placement-db-sync-8g8rz\" (UID: \"faf5645f-a25c-4bde-9769-51e1681b7eba\") " pod="openstack/placement-db-sync-8g8rz" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.049063 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/faf5645f-a25c-4bde-9769-51e1681b7eba-logs\") pod \"placement-db-sync-8g8rz\" (UID: \"faf5645f-a25c-4bde-9769-51e1681b7eba\") " pod="openstack/placement-db-sync-8g8rz" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.049088 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/faf5645f-a25c-4bde-9769-51e1681b7eba-scripts\") pod \"placement-db-sync-8g8rz\" (UID: \"faf5645f-a25c-4bde-9769-51e1681b7eba\") " pod="openstack/placement-db-sync-8g8rz" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.049122 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mf8r9\" (UniqueName: \"kubernetes.io/projected/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-kube-api-access-mf8r9\") pod \"ceilometer-0\" (UID: \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\") " pod="openstack/ceilometer-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.049160 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vl5j\" (UniqueName: \"kubernetes.io/projected/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-kube-api-access-2vl5j\") pod \"glance-default-external-api-0\" (UID: \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.049186 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wf86m\" (UniqueName: \"kubernetes.io/projected/faf5645f-a25c-4bde-9769-51e1681b7eba-kube-api-access-wf86m\") pod \"placement-db-sync-8g8rz\" (UID: \"faf5645f-a25c-4bde-9769-51e1681b7eba\") " pod="openstack/placement-db-sync-8g8rz" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.049213 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-logs\") pod \"glance-default-external-api-0\" (UID: \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.049244 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-scripts\") pod \"ceilometer-0\" (UID: \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\") " pod="openstack/ceilometer-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.049265 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-log-httpd\") pod \"ceilometer-0\" (UID: \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\") " pod="openstack/ceilometer-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.049288 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.049320 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-config-data\") pod \"glance-default-external-api-0\" (UID: \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.049351 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-config-data\") pod \"ceilometer-0\" (UID: \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\") " pod="openstack/ceilometer-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.049375 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/faf5645f-a25c-4bde-9769-51e1681b7eba-combined-ca-bundle\") pod \"placement-db-sync-8g8rz\" (UID: \"faf5645f-a25c-4bde-9769-51e1681b7eba\") " pod="openstack/placement-db-sync-8g8rz" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.049407 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-run-httpd\") pod \"ceilometer-0\" (UID: \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\") " pod="openstack/ceilometer-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.049431 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\") " pod="openstack/ceilometer-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.049454 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\") " pod="openstack/ceilometer-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.050030 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-4jnwp" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.053887 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-54t6g"] Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.082802 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pjbwv\" (UniqueName: \"kubernetes.io/projected/09239709-f618-437f-a720-070aff572294-kube-api-access-pjbwv\") pod \"barbican-db-sync-qvhpm\" (UID: \"09239709-f618-437f-a720-070aff572294\") " pod="openstack/barbican-db-sync-qvhpm" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.083225 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/09239709-f618-437f-a720-070aff572294-db-sync-config-data\") pod \"barbican-db-sync-qvhpm\" (UID: \"09239709-f618-437f-a720-070aff572294\") " pod="openstack/barbican-db-sync-qvhpm" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.087122 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vbqvg\" (UniqueName: \"kubernetes.io/projected/b581c7d3-dc44-4505-b3f7-fc7aff32f5df-kube-api-access-vbqvg\") pod \"horizon-7f64cf65df-8hgb4\" (UID: \"b581c7d3-dc44-4505-b3f7-fc7aff32f5df\") " pod="openstack/horizon-7f64cf65df-8hgb4" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.088018 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b581c7d3-dc44-4505-b3f7-fc7aff32f5df-horizon-secret-key\") pod \"horizon-7f64cf65df-8hgb4\" (UID: \"b581c7d3-dc44-4505-b3f7-fc7aff32f5df\") " pod="openstack/horizon-7f64cf65df-8hgb4" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.103469 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7f64cf65df-8hgb4" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.118011 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.120194 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.125044 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.125260 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.160555 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.181728 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.181794 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/70e09fe5-1d59-4585-a97c-0fac3f622b07-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-54t6g\" (UID: \"70e09fe5-1d59-4585-a97c-0fac3f622b07\") " pod="openstack/dnsmasq-dns-56df8fb6b7-54t6g" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.181852 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-config-data\") pod \"glance-default-external-api-0\" (UID: \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.181882 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/70e09fe5-1d59-4585-a97c-0fac3f622b07-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-54t6g\" (UID: \"70e09fe5-1d59-4585-a97c-0fac3f622b07\") " pod="openstack/dnsmasq-dns-56df8fb6b7-54t6g" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.181928 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-config-data\") pod \"ceilometer-0\" (UID: \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\") " pod="openstack/ceilometer-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.181966 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/faf5645f-a25c-4bde-9769-51e1681b7eba-combined-ca-bundle\") pod \"placement-db-sync-8g8rz\" (UID: \"faf5645f-a25c-4bde-9769-51e1681b7eba\") " pod="openstack/placement-db-sync-8g8rz" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.182047 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-run-httpd\") pod \"ceilometer-0\" (UID: \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\") " pod="openstack/ceilometer-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.182075 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/70e09fe5-1d59-4585-a97c-0fac3f622b07-config\") pod \"dnsmasq-dns-56df8fb6b7-54t6g\" (UID: \"70e09fe5-1d59-4585-a97c-0fac3f622b07\") " pod="openstack/dnsmasq-dns-56df8fb6b7-54t6g" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.182113 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\") " pod="openstack/ceilometer-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.182152 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\") " pod="openstack/ceilometer-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.182181 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.182205 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-scripts\") pod \"glance-default-external-api-0\" (UID: \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.182238 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.198516 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/faf5645f-a25c-4bde-9769-51e1681b7eba-combined-ca-bundle\") pod \"placement-db-sync-8g8rz\" (UID: \"faf5645f-a25c-4bde-9769-51e1681b7eba\") " pod="openstack/placement-db-sync-8g8rz" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.199109 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.199161 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/70e09fe5-1d59-4585-a97c-0fac3f622b07-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-54t6g\" (UID: \"70e09fe5-1d59-4585-a97c-0fac3f622b07\") " pod="openstack/dnsmasq-dns-56df8fb6b7-54t6g" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.199210 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/faf5645f-a25c-4bde-9769-51e1681b7eba-config-data\") pod \"placement-db-sync-8g8rz\" (UID: \"faf5645f-a25c-4bde-9769-51e1681b7eba\") " pod="openstack/placement-db-sync-8g8rz" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.199303 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/faf5645f-a25c-4bde-9769-51e1681b7eba-logs\") pod \"placement-db-sync-8g8rz\" (UID: \"faf5645f-a25c-4bde-9769-51e1681b7eba\") " pod="openstack/placement-db-sync-8g8rz" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.199331 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/70e09fe5-1d59-4585-a97c-0fac3f622b07-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-54t6g\" (UID: \"70e09fe5-1d59-4585-a97c-0fac3f622b07\") " pod="openstack/dnsmasq-dns-56df8fb6b7-54t6g" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.199362 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/faf5645f-a25c-4bde-9769-51e1681b7eba-scripts\") pod \"placement-db-sync-8g8rz\" (UID: \"faf5645f-a25c-4bde-9769-51e1681b7eba\") " pod="openstack/placement-db-sync-8g8rz" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.199408 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mf8r9\" (UniqueName: \"kubernetes.io/projected/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-kube-api-access-mf8r9\") pod \"ceilometer-0\" (UID: \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\") " pod="openstack/ceilometer-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.199433 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-86j9p\" (UniqueName: \"kubernetes.io/projected/70e09fe5-1d59-4585-a97c-0fac3f622b07-kube-api-access-86j9p\") pod \"dnsmasq-dns-56df8fb6b7-54t6g\" (UID: \"70e09fe5-1d59-4585-a97c-0fac3f622b07\") " pod="openstack/dnsmasq-dns-56df8fb6b7-54t6g" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.199469 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vl5j\" (UniqueName: \"kubernetes.io/projected/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-kube-api-access-2vl5j\") pod \"glance-default-external-api-0\" (UID: \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.199528 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wf86m\" (UniqueName: \"kubernetes.io/projected/faf5645f-a25c-4bde-9769-51e1681b7eba-kube-api-access-wf86m\") pod \"placement-db-sync-8g8rz\" (UID: \"faf5645f-a25c-4bde-9769-51e1681b7eba\") " pod="openstack/placement-db-sync-8g8rz" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.199588 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-logs\") pod \"glance-default-external-api-0\" (UID: \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.199642 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-scripts\") pod \"ceilometer-0\" (UID: \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\") " pod="openstack/ceilometer-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.199669 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-log-httpd\") pod \"ceilometer-0\" (UID: \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\") " pod="openstack/ceilometer-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.201329 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-run-httpd\") pod \"ceilometer-0\" (UID: \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\") " pod="openstack/ceilometer-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.209082 4691 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-external-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.217959 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.227026 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/faf5645f-a25c-4bde-9769-51e1681b7eba-logs\") pod \"placement-db-sync-8g8rz\" (UID: \"faf5645f-a25c-4bde-9769-51e1681b7eba\") " pod="openstack/placement-db-sync-8g8rz" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.227355 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.227604 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-log-httpd\") pod \"ceilometer-0\" (UID: \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\") " pod="openstack/ceilometer-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.227955 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-logs\") pod \"glance-default-external-api-0\" (UID: \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.230202 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.296968 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mf8r9\" (UniqueName: \"kubernetes.io/projected/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-kube-api-access-mf8r9\") pod \"ceilometer-0\" (UID: \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\") " pod="openstack/ceilometer-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.297023 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vl5j\" (UniqueName: \"kubernetes.io/projected/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-kube-api-access-2vl5j\") pod \"glance-default-external-api-0\" (UID: \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.296979 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-scripts\") pod \"ceilometer-0\" (UID: \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\") " pod="openstack/ceilometer-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.297317 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-config-data\") pod \"ceilometer-0\" (UID: \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\") " pod="openstack/ceilometer-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.297541 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\") " pod="openstack/ceilometer-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.298067 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/faf5645f-a25c-4bde-9769-51e1681b7eba-scripts\") pod \"placement-db-sync-8g8rz\" (UID: \"faf5645f-a25c-4bde-9769-51e1681b7eba\") " pod="openstack/placement-db-sync-8g8rz" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.297853 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\") " pod="openstack/ceilometer-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.298829 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-scripts\") pod \"glance-default-external-api-0\" (UID: \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.299461 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-config-data\") pod \"glance-default-external-api-0\" (UID: \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.308419 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/70e09fe5-1d59-4585-a97c-0fac3f622b07-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-54t6g\" (UID: \"70e09fe5-1d59-4585-a97c-0fac3f622b07\") " pod="openstack/dnsmasq-dns-56df8fb6b7-54t6g" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.308568 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l2fp7\" (UniqueName: \"kubernetes.io/projected/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-kube-api-access-l2fp7\") pod \"glance-default-internal-api-0\" (UID: \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.308612 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.308636 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/70e09fe5-1d59-4585-a97c-0fac3f622b07-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-54t6g\" (UID: \"70e09fe5-1d59-4585-a97c-0fac3f622b07\") " pod="openstack/dnsmasq-dns-56df8fb6b7-54t6g" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.308674 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-logs\") pod \"glance-default-internal-api-0\" (UID: \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.308718 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-86j9p\" (UniqueName: \"kubernetes.io/projected/70e09fe5-1d59-4585-a97c-0fac3f622b07-kube-api-access-86j9p\") pod \"dnsmasq-dns-56df8fb6b7-54t6g\" (UID: \"70e09fe5-1d59-4585-a97c-0fac3f622b07\") " pod="openstack/dnsmasq-dns-56df8fb6b7-54t6g" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.308933 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.308973 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.309083 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.309292 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/70e09fe5-1d59-4585-a97c-0fac3f622b07-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-54t6g\" (UID: \"70e09fe5-1d59-4585-a97c-0fac3f622b07\") " pod="openstack/dnsmasq-dns-56df8fb6b7-54t6g" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.309330 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.309363 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.310011 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/70e09fe5-1d59-4585-a97c-0fac3f622b07-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-54t6g\" (UID: \"70e09fe5-1d59-4585-a97c-0fac3f622b07\") " pod="openstack/dnsmasq-dns-56df8fb6b7-54t6g" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.310229 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/70e09fe5-1d59-4585-a97c-0fac3f622b07-config\") pod \"dnsmasq-dns-56df8fb6b7-54t6g\" (UID: \"70e09fe5-1d59-4585-a97c-0fac3f622b07\") " pod="openstack/dnsmasq-dns-56df8fb6b7-54t6g" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.316392 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.323631 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/faf5645f-a25c-4bde-9769-51e1681b7eba-config-data\") pod \"placement-db-sync-8g8rz\" (UID: \"faf5645f-a25c-4bde-9769-51e1681b7eba\") " pod="openstack/placement-db-sync-8g8rz" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.324149 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/70e09fe5-1d59-4585-a97c-0fac3f622b07-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-54t6g\" (UID: \"70e09fe5-1d59-4585-a97c-0fac3f622b07\") " pod="openstack/dnsmasq-dns-56df8fb6b7-54t6g" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.327298 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/70e09fe5-1d59-4585-a97c-0fac3f622b07-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-54t6g\" (UID: \"70e09fe5-1d59-4585-a97c-0fac3f622b07\") " pod="openstack/dnsmasq-dns-56df8fb6b7-54t6g" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.328458 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/70e09fe5-1d59-4585-a97c-0fac3f622b07-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-54t6g\" (UID: \"70e09fe5-1d59-4585-a97c-0fac3f622b07\") " pod="openstack/dnsmasq-dns-56df8fb6b7-54t6g" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.327335 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/70e09fe5-1d59-4585-a97c-0fac3f622b07-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-54t6g\" (UID: \"70e09fe5-1d59-4585-a97c-0fac3f622b07\") " pod="openstack/dnsmasq-dns-56df8fb6b7-54t6g" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.330186 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/70e09fe5-1d59-4585-a97c-0fac3f622b07-config\") pod \"dnsmasq-dns-56df8fb6b7-54t6g\" (UID: \"70e09fe5-1d59-4585-a97c-0fac3f622b07\") " pod="openstack/dnsmasq-dns-56df8fb6b7-54t6g" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.351673 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-vsrzv" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.370647 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wf86m\" (UniqueName: \"kubernetes.io/projected/faf5645f-a25c-4bde-9769-51e1681b7eba-kube-api-access-wf86m\") pod \"placement-db-sync-8g8rz\" (UID: \"faf5645f-a25c-4bde-9769-51e1681b7eba\") " pod="openstack/placement-db-sync-8g8rz" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.382122 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-qvhpm" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.388355 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-86j9p\" (UniqueName: \"kubernetes.io/projected/70e09fe5-1d59-4585-a97c-0fac3f622b07-kube-api-access-86j9p\") pod \"dnsmasq-dns-56df8fb6b7-54t6g\" (UID: \"70e09fe5-1d59-4585-a97c-0fac3f622b07\") " pod="openstack/dnsmasq-dns-56df8fb6b7-54t6g" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.404859 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.433024 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l2fp7\" (UniqueName: \"kubernetes.io/projected/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-kube-api-access-l2fp7\") pod \"glance-default-internal-api-0\" (UID: \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.433426 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.436135 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-8g8rz" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.436319 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-logs\") pod \"glance-default-internal-api-0\" (UID: \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.441074 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.441180 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.441685 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.441983 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.442072 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.442109 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.443745 4691 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-internal-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.463876 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-logs\") pod \"glance-default-internal-api-0\" (UID: \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.464983 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.465373 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.486666 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.487316 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.487986 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.487990 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l2fp7\" (UniqueName: \"kubernetes.io/projected/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-kube-api-access-l2fp7\") pod \"glance-default-internal-api-0\" (UID: \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.549432 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-54t6g" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.570994 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.634296 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 24 08:13:15 crc kubenswrapper[4691]: I1124 08:13:15.779991 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bbf5cc879-vqnvn"] Nov 24 08:13:16 crc kubenswrapper[4691]: I1124 08:13:16.014648 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bbf5cc879-vqnvn" event={"ID":"8beacf4c-49bf-4497-9a54-0ba1239969b3","Type":"ContainerStarted","Data":"c97273aaa7b0a308f6a04dd429d8e4aee8816c656857ee5783c25923bedb11f5"} Nov 24 08:13:16 crc kubenswrapper[4691]: I1124 08:13:16.074306 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-jtk2j"] Nov 24 08:13:16 crc kubenswrapper[4691]: I1124 08:13:16.394184 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7f64cf65df-8hgb4"] Nov 24 08:13:16 crc kubenswrapper[4691]: W1124 08:13:16.405161 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb581c7d3_dc44_4505_b3f7_fc7aff32f5df.slice/crio-0f47ebfc3dd12985c7619527ea620401dd7e7620f52553cbeaf92f855c6124bd WatchSource:0}: Error finding container 0f47ebfc3dd12985c7619527ea620401dd7e7620f52553cbeaf92f855c6124bd: Status 404 returned error can't find the container with id 0f47ebfc3dd12985c7619527ea620401dd7e7620f52553cbeaf92f855c6124bd Nov 24 08:13:16 crc kubenswrapper[4691]: I1124 08:13:16.407660 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-797fd4697-lsphq"] Nov 24 08:13:16 crc kubenswrapper[4691]: I1124 08:13:16.438786 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-4jnwp"] Nov 24 08:13:16 crc kubenswrapper[4691]: I1124 08:13:16.522409 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-qvhpm"] Nov 24 08:13:16 crc kubenswrapper[4691]: I1124 08:13:16.536653 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:13:16 crc kubenswrapper[4691]: W1124 08:13:16.547278 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3ad1b6c3_36a5_4991_b30f_092d3bf5018b.slice/crio-7c03c469829d19475adf34a4c97b6fd1ebd4b6ff86feb06771d62533c06cd67d WatchSource:0}: Error finding container 7c03c469829d19475adf34a4c97b6fd1ebd4b6ff86feb06771d62533c06cd67d: Status 404 returned error can't find the container with id 7c03c469829d19475adf34a4c97b6fd1ebd4b6ff86feb06771d62533c06cd67d Nov 24 08:13:16 crc kubenswrapper[4691]: I1124 08:13:16.628249 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 08:13:16 crc kubenswrapper[4691]: I1124 08:13:16.753216 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-54t6g"] Nov 24 08:13:16 crc kubenswrapper[4691]: W1124 08:13:16.777799 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod29ff644c_aef6_4092_9dcf_1b4562e662d4.slice/crio-c09484fa238e48a2093c19dfa3052b745691097de7c8495bb8f96e6fb0e12576 WatchSource:0}: Error finding container c09484fa238e48a2093c19dfa3052b745691097de7c8495bb8f96e6fb0e12576: Status 404 returned error can't find the container with id c09484fa238e48a2093c19dfa3052b745691097de7c8495bb8f96e6fb0e12576 Nov 24 08:13:16 crc kubenswrapper[4691]: I1124 08:13:16.790850 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-8g8rz"] Nov 24 08:13:16 crc kubenswrapper[4691]: I1124 08:13:16.793612 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-vsrzv"] Nov 24 08:13:17 crc kubenswrapper[4691]: I1124 08:13:17.012218 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 08:13:17 crc kubenswrapper[4691]: I1124 08:13:17.033973 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-54t6g" event={"ID":"70e09fe5-1d59-4585-a97c-0fac3f622b07","Type":"ContainerStarted","Data":"348dd04aa3da8c825c94bcb059dc6b82613762fc10866eab9fae798582c8c65f"} Nov 24 08:13:17 crc kubenswrapper[4691]: I1124 08:13:17.037714 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4f7b8261-7f82-49d0-932a-7a3a6c3ba298","Type":"ContainerStarted","Data":"f7b985be0860ff458e4e0e8be7d956d9b9e8c0efdff81111084872fb39467339"} Nov 24 08:13:17 crc kubenswrapper[4691]: I1124 08:13:17.040089 4691 generic.go:334] "Generic (PLEG): container finished" podID="8beacf4c-49bf-4497-9a54-0ba1239969b3" containerID="e136938532a96b77a76d61d533974a79784ee231d121bc6cbbb6536a4233f5e4" exitCode=0 Nov 24 08:13:17 crc kubenswrapper[4691]: I1124 08:13:17.040146 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bbf5cc879-vqnvn" event={"ID":"8beacf4c-49bf-4497-9a54-0ba1239969b3","Type":"ContainerDied","Data":"e136938532a96b77a76d61d533974a79784ee231d121bc6cbbb6536a4233f5e4"} Nov 24 08:13:17 crc kubenswrapper[4691]: I1124 08:13:17.042767 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-797fd4697-lsphq" event={"ID":"7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4","Type":"ContainerStarted","Data":"f6fc6c0b9fa191e8ac35a61f3c9da0c6c251b49d23fc0f8c8c062f449c6bf7a1"} Nov 24 08:13:17 crc kubenswrapper[4691]: I1124 08:13:17.044152 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3ad1b6c3-36a5-4991-b30f-092d3bf5018b","Type":"ContainerStarted","Data":"7c03c469829d19475adf34a4c97b6fd1ebd4b6ff86feb06771d62533c06cd67d"} Nov 24 08:13:17 crc kubenswrapper[4691]: I1124 08:13:17.057691 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-qvhpm" event={"ID":"09239709-f618-437f-a720-070aff572294","Type":"ContainerStarted","Data":"b89cd7b6c28b3ca1e6bb31428ceb318dd3850a0c140d36323019e096ccc6e1c2"} Nov 24 08:13:17 crc kubenswrapper[4691]: I1124 08:13:17.070821 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-4jnwp" event={"ID":"e4bd742d-a7a1-402b-b1fa-9dde10e15952","Type":"ContainerStarted","Data":"3acae0443029415644f3322c8e8498e0a985411835b7279f62aa62e60b852762"} Nov 24 08:13:17 crc kubenswrapper[4691]: I1124 08:13:17.070880 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-4jnwp" event={"ID":"e4bd742d-a7a1-402b-b1fa-9dde10e15952","Type":"ContainerStarted","Data":"20e58f4db05b47b155a5154ea9a2264b693bc113d7bc334307061af03020ea08"} Nov 24 08:13:17 crc kubenswrapper[4691]: I1124 08:13:17.076978 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-vsrzv" event={"ID":"29ff644c-aef6-4092-9dcf-1b4562e662d4","Type":"ContainerStarted","Data":"c09484fa238e48a2093c19dfa3052b745691097de7c8495bb8f96e6fb0e12576"} Nov 24 08:13:17 crc kubenswrapper[4691]: I1124 08:13:17.091784 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-4jnwp" podStartSLOduration=3.091761261 podStartE2EDuration="3.091761261s" podCreationTimestamp="2025-11-24 08:13:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:13:17.09002716 +0000 UTC m=+959.088976409" watchObservedRunningTime="2025-11-24 08:13:17.091761261 +0000 UTC m=+959.090710510" Nov 24 08:13:17 crc kubenswrapper[4691]: I1124 08:13:17.110818 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-8g8rz" event={"ID":"faf5645f-a25c-4bde-9769-51e1681b7eba","Type":"ContainerStarted","Data":"9334e8165dca42cd8a11e62bdc02a5dca97951253da38efa802c0f7707382626"} Nov 24 08:13:17 crc kubenswrapper[4691]: I1124 08:13:17.115594 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7f64cf65df-8hgb4" event={"ID":"b581c7d3-dc44-4505-b3f7-fc7aff32f5df","Type":"ContainerStarted","Data":"0f47ebfc3dd12985c7619527ea620401dd7e7620f52553cbeaf92f855c6124bd"} Nov 24 08:13:17 crc kubenswrapper[4691]: I1124 08:13:17.121346 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-jtk2j" event={"ID":"93da1283-17cd-4b15-a4dd-db78d80c187e","Type":"ContainerStarted","Data":"eccd8ce2de7bf41e77a7fbe034d937c8020882af501290d4e72451ca8383d5f1"} Nov 24 08:13:17 crc kubenswrapper[4691]: I1124 08:13:17.121383 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-jtk2j" event={"ID":"93da1283-17cd-4b15-a4dd-db78d80c187e","Type":"ContainerStarted","Data":"1ce455d41ea62c0166001b1c706999bc5c43e27c1e6ce2b323eded71770da8bc"} Nov 24 08:13:17 crc kubenswrapper[4691]: I1124 08:13:17.161555 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-jtk2j" podStartSLOduration=3.161535206 podStartE2EDuration="3.161535206s" podCreationTimestamp="2025-11-24 08:13:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:13:17.143269165 +0000 UTC m=+959.142218414" watchObservedRunningTime="2025-11-24 08:13:17.161535206 +0000 UTC m=+959.160484455" Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.009086 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.043710 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-797fd4697-lsphq"] Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.072253 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-658647c585-2tj4h"] Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.073734 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-658647c585-2tj4h" Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.099267 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-658647c585-2tj4h"] Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.163498 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.200795 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.214251 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4f7b8261-7f82-49d0-932a-7a3a6c3ba298","Type":"ContainerStarted","Data":"3f699a45f34a7c4ca37c6b0d9a38d3497f1e3dad6b23c735b0893c53fd3c6ef7"} Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.218159 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a","Type":"ContainerStarted","Data":"7ab634270cd255930ad61580c20c706419798f6223b9f514df6bde229e9a9909"} Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.224676 4691 generic.go:334] "Generic (PLEG): container finished" podID="70e09fe5-1d59-4585-a97c-0fac3f622b07" containerID="e49eaeb23f880e45af73af17652fcc892ae3e7db8c73c79bbe6f0c48adf99b99" exitCode=0 Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.224814 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-54t6g" event={"ID":"70e09fe5-1d59-4585-a97c-0fac3f622b07","Type":"ContainerDied","Data":"e49eaeb23f880e45af73af17652fcc892ae3e7db8c73c79bbe6f0c48adf99b99"} Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.244104 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bbf5cc879-vqnvn" Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.254735 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fv6ph\" (UniqueName: \"kubernetes.io/projected/3a983467-b9da-4795-8cae-f645d8e316b4-kube-api-access-fv6ph\") pod \"horizon-658647c585-2tj4h\" (UID: \"3a983467-b9da-4795-8cae-f645d8e316b4\") " pod="openstack/horizon-658647c585-2tj4h" Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.254844 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3a983467-b9da-4795-8cae-f645d8e316b4-config-data\") pod \"horizon-658647c585-2tj4h\" (UID: \"3a983467-b9da-4795-8cae-f645d8e316b4\") " pod="openstack/horizon-658647c585-2tj4h" Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.254894 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3a983467-b9da-4795-8cae-f645d8e316b4-scripts\") pod \"horizon-658647c585-2tj4h\" (UID: \"3a983467-b9da-4795-8cae-f645d8e316b4\") " pod="openstack/horizon-658647c585-2tj4h" Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.254925 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a983467-b9da-4795-8cae-f645d8e316b4-logs\") pod \"horizon-658647c585-2tj4h\" (UID: \"3a983467-b9da-4795-8cae-f645d8e316b4\") " pod="openstack/horizon-658647c585-2tj4h" Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.275987 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3a983467-b9da-4795-8cae-f645d8e316b4-horizon-secret-key\") pod \"horizon-658647c585-2tj4h\" (UID: \"3a983467-b9da-4795-8cae-f645d8e316b4\") " pod="openstack/horizon-658647c585-2tj4h" Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.379598 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8beacf4c-49bf-4497-9a54-0ba1239969b3-dns-swift-storage-0\") pod \"8beacf4c-49bf-4497-9a54-0ba1239969b3\" (UID: \"8beacf4c-49bf-4497-9a54-0ba1239969b3\") " Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.379863 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8beacf4c-49bf-4497-9a54-0ba1239969b3-config\") pod \"8beacf4c-49bf-4497-9a54-0ba1239969b3\" (UID: \"8beacf4c-49bf-4497-9a54-0ba1239969b3\") " Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.379939 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8beacf4c-49bf-4497-9a54-0ba1239969b3-ovsdbserver-nb\") pod \"8beacf4c-49bf-4497-9a54-0ba1239969b3\" (UID: \"8beacf4c-49bf-4497-9a54-0ba1239969b3\") " Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.380087 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6k2tk\" (UniqueName: \"kubernetes.io/projected/8beacf4c-49bf-4497-9a54-0ba1239969b3-kube-api-access-6k2tk\") pod \"8beacf4c-49bf-4497-9a54-0ba1239969b3\" (UID: \"8beacf4c-49bf-4497-9a54-0ba1239969b3\") " Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.380344 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8beacf4c-49bf-4497-9a54-0ba1239969b3-dns-svc\") pod \"8beacf4c-49bf-4497-9a54-0ba1239969b3\" (UID: \"8beacf4c-49bf-4497-9a54-0ba1239969b3\") " Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.380571 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8beacf4c-49bf-4497-9a54-0ba1239969b3-ovsdbserver-sb\") pod \"8beacf4c-49bf-4497-9a54-0ba1239969b3\" (UID: \"8beacf4c-49bf-4497-9a54-0ba1239969b3\") " Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.382346 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3a983467-b9da-4795-8cae-f645d8e316b4-horizon-secret-key\") pod \"horizon-658647c585-2tj4h\" (UID: \"3a983467-b9da-4795-8cae-f645d8e316b4\") " pod="openstack/horizon-658647c585-2tj4h" Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.382532 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fv6ph\" (UniqueName: \"kubernetes.io/projected/3a983467-b9da-4795-8cae-f645d8e316b4-kube-api-access-fv6ph\") pod \"horizon-658647c585-2tj4h\" (UID: \"3a983467-b9da-4795-8cae-f645d8e316b4\") " pod="openstack/horizon-658647c585-2tj4h" Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.382796 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3a983467-b9da-4795-8cae-f645d8e316b4-config-data\") pod \"horizon-658647c585-2tj4h\" (UID: \"3a983467-b9da-4795-8cae-f645d8e316b4\") " pod="openstack/horizon-658647c585-2tj4h" Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.382925 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3a983467-b9da-4795-8cae-f645d8e316b4-scripts\") pod \"horizon-658647c585-2tj4h\" (UID: \"3a983467-b9da-4795-8cae-f645d8e316b4\") " pod="openstack/horizon-658647c585-2tj4h" Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.383023 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a983467-b9da-4795-8cae-f645d8e316b4-logs\") pod \"horizon-658647c585-2tj4h\" (UID: \"3a983467-b9da-4795-8cae-f645d8e316b4\") " pod="openstack/horizon-658647c585-2tj4h" Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.391050 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3a983467-b9da-4795-8cae-f645d8e316b4-config-data\") pod \"horizon-658647c585-2tj4h\" (UID: \"3a983467-b9da-4795-8cae-f645d8e316b4\") " pod="openstack/horizon-658647c585-2tj4h" Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.391151 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a983467-b9da-4795-8cae-f645d8e316b4-logs\") pod \"horizon-658647c585-2tj4h\" (UID: \"3a983467-b9da-4795-8cae-f645d8e316b4\") " pod="openstack/horizon-658647c585-2tj4h" Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.391676 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3a983467-b9da-4795-8cae-f645d8e316b4-scripts\") pod \"horizon-658647c585-2tj4h\" (UID: \"3a983467-b9da-4795-8cae-f645d8e316b4\") " pod="openstack/horizon-658647c585-2tj4h" Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.397031 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8beacf4c-49bf-4497-9a54-0ba1239969b3-kube-api-access-6k2tk" (OuterVolumeSpecName: "kube-api-access-6k2tk") pod "8beacf4c-49bf-4497-9a54-0ba1239969b3" (UID: "8beacf4c-49bf-4497-9a54-0ba1239969b3"). InnerVolumeSpecName "kube-api-access-6k2tk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.409877 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3a983467-b9da-4795-8cae-f645d8e316b4-horizon-secret-key\") pod \"horizon-658647c585-2tj4h\" (UID: \"3a983467-b9da-4795-8cae-f645d8e316b4\") " pod="openstack/horizon-658647c585-2tj4h" Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.420263 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fv6ph\" (UniqueName: \"kubernetes.io/projected/3a983467-b9da-4795-8cae-f645d8e316b4-kube-api-access-fv6ph\") pod \"horizon-658647c585-2tj4h\" (UID: \"3a983467-b9da-4795-8cae-f645d8e316b4\") " pod="openstack/horizon-658647c585-2tj4h" Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.442635 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-658647c585-2tj4h" Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.443087 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8beacf4c-49bf-4497-9a54-0ba1239969b3-config" (OuterVolumeSpecName: "config") pod "8beacf4c-49bf-4497-9a54-0ba1239969b3" (UID: "8beacf4c-49bf-4497-9a54-0ba1239969b3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.458038 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8beacf4c-49bf-4497-9a54-0ba1239969b3-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8beacf4c-49bf-4497-9a54-0ba1239969b3" (UID: "8beacf4c-49bf-4497-9a54-0ba1239969b3"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.481876 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8beacf4c-49bf-4497-9a54-0ba1239969b3-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8beacf4c-49bf-4497-9a54-0ba1239969b3" (UID: "8beacf4c-49bf-4497-9a54-0ba1239969b3"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.485424 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8beacf4c-49bf-4497-9a54-0ba1239969b3-config\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.485465 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6k2tk\" (UniqueName: \"kubernetes.io/projected/8beacf4c-49bf-4497-9a54-0ba1239969b3-kube-api-access-6k2tk\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.485595 4691 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8beacf4c-49bf-4497-9a54-0ba1239969b3-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.485605 4691 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8beacf4c-49bf-4497-9a54-0ba1239969b3-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.491175 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8beacf4c-49bf-4497-9a54-0ba1239969b3-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8beacf4c-49bf-4497-9a54-0ba1239969b3" (UID: "8beacf4c-49bf-4497-9a54-0ba1239969b3"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.501859 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8beacf4c-49bf-4497-9a54-0ba1239969b3-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "8beacf4c-49bf-4497-9a54-0ba1239969b3" (UID: "8beacf4c-49bf-4497-9a54-0ba1239969b3"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.587386 4691 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8beacf4c-49bf-4497-9a54-0ba1239969b3-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:18 crc kubenswrapper[4691]: I1124 08:13:18.587427 4691 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8beacf4c-49bf-4497-9a54-0ba1239969b3-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:19 crc kubenswrapper[4691]: I1124 08:13:19.204339 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-658647c585-2tj4h"] Nov 24 08:13:19 crc kubenswrapper[4691]: W1124 08:13:19.223341 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3a983467_b9da_4795_8cae_f645d8e316b4.slice/crio-743b7405bb72b1859aa19f915019ac8f1dd48a72a098ef986ff57fcf286accd4 WatchSource:0}: Error finding container 743b7405bb72b1859aa19f915019ac8f1dd48a72a098ef986ff57fcf286accd4: Status 404 returned error can't find the container with id 743b7405bb72b1859aa19f915019ac8f1dd48a72a098ef986ff57fcf286accd4 Nov 24 08:13:19 crc kubenswrapper[4691]: I1124 08:13:19.248797 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-658647c585-2tj4h" event={"ID":"3a983467-b9da-4795-8cae-f645d8e316b4","Type":"ContainerStarted","Data":"743b7405bb72b1859aa19f915019ac8f1dd48a72a098ef986ff57fcf286accd4"} Nov 24 08:13:19 crc kubenswrapper[4691]: I1124 08:13:19.256847 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a","Type":"ContainerStarted","Data":"0e0d8784ebec3c3ae5c188d680642a6c1ad08794631905b74557263f7041e131"} Nov 24 08:13:19 crc kubenswrapper[4691]: I1124 08:13:19.264267 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-54t6g" event={"ID":"70e09fe5-1d59-4585-a97c-0fac3f622b07","Type":"ContainerStarted","Data":"e519ccf8e2d5980411184dae79d01bd5ddc9ed046bc2227cb27ec45e4040059e"} Nov 24 08:13:19 crc kubenswrapper[4691]: I1124 08:13:19.264396 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-56df8fb6b7-54t6g" Nov 24 08:13:19 crc kubenswrapper[4691]: I1124 08:13:19.272258 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="4f7b8261-7f82-49d0-932a-7a3a6c3ba298" containerName="glance-log" containerID="cri-o://3f699a45f34a7c4ca37c6b0d9a38d3497f1e3dad6b23c735b0893c53fd3c6ef7" gracePeriod=30 Nov 24 08:13:19 crc kubenswrapper[4691]: I1124 08:13:19.272307 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="4f7b8261-7f82-49d0-932a-7a3a6c3ba298" containerName="glance-httpd" containerID="cri-o://d1214b8c7dcd661729ab5f6eb154bb4d1d9594c71856fdccae71ff5610970168" gracePeriod=30 Nov 24 08:13:19 crc kubenswrapper[4691]: I1124 08:13:19.275292 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bbf5cc879-vqnvn" event={"ID":"8beacf4c-49bf-4497-9a54-0ba1239969b3","Type":"ContainerDied","Data":"c97273aaa7b0a308f6a04dd429d8e4aee8816c656857ee5783c25923bedb11f5"} Nov 24 08:13:19 crc kubenswrapper[4691]: I1124 08:13:19.275348 4691 scope.go:117] "RemoveContainer" containerID="e136938532a96b77a76d61d533974a79784ee231d121bc6cbbb6536a4233f5e4" Nov 24 08:13:19 crc kubenswrapper[4691]: I1124 08:13:19.275582 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bbf5cc879-vqnvn" Nov 24 08:13:19 crc kubenswrapper[4691]: I1124 08:13:19.294592 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-56df8fb6b7-54t6g" podStartSLOduration=5.294566682 podStartE2EDuration="5.294566682s" podCreationTimestamp="2025-11-24 08:13:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:13:19.288631309 +0000 UTC m=+961.287580558" watchObservedRunningTime="2025-11-24 08:13:19.294566682 +0000 UTC m=+961.293515931" Nov 24 08:13:19 crc kubenswrapper[4691]: I1124 08:13:19.356717 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bbf5cc879-vqnvn"] Nov 24 08:13:19 crc kubenswrapper[4691]: I1124 08:13:19.374384 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.374348057 podStartE2EDuration="5.374348057s" podCreationTimestamp="2025-11-24 08:13:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:13:19.343550443 +0000 UTC m=+961.342499692" watchObservedRunningTime="2025-11-24 08:13:19.374348057 +0000 UTC m=+961.373297306" Nov 24 08:13:19 crc kubenswrapper[4691]: I1124 08:13:19.374736 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-bbf5cc879-vqnvn"] Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.254106 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.307525 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-combined-ca-bundle\") pod \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\" (UID: \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\") " Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.307626 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-logs\") pod \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\" (UID: \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\") " Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.307654 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-scripts\") pod \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\" (UID: \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\") " Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.307726 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\" (UID: \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\") " Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.308020 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-public-tls-certs\") pod \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\" (UID: \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\") " Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.308052 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2vl5j\" (UniqueName: \"kubernetes.io/projected/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-kube-api-access-2vl5j\") pod \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\" (UID: \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\") " Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.308127 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-httpd-run\") pod \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\" (UID: \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\") " Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.308154 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-config-data\") pod \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\" (UID: \"4f7b8261-7f82-49d0-932a-7a3a6c3ba298\") " Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.309050 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-logs" (OuterVolumeSpecName: "logs") pod "4f7b8261-7f82-49d0-932a-7a3a6c3ba298" (UID: "4f7b8261-7f82-49d0-932a-7a3a6c3ba298"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.311211 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "4f7b8261-7f82-49d0-932a-7a3a6c3ba298" (UID: "4f7b8261-7f82-49d0-932a-7a3a6c3ba298"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.320887 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "4f7b8261-7f82-49d0-932a-7a3a6c3ba298" (UID: "4f7b8261-7f82-49d0-932a-7a3a6c3ba298"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.321367 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a","Type":"ContainerStarted","Data":"f14a45cc801cd88ea97987e654ce842d7490529de657dca83406b36cfadd237d"} Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.321987 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a" containerName="glance-log" containerID="cri-o://0e0d8784ebec3c3ae5c188d680642a6c1ad08794631905b74557263f7041e131" gracePeriod=30 Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.322027 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a" containerName="glance-httpd" containerID="cri-o://f14a45cc801cd88ea97987e654ce842d7490529de657dca83406b36cfadd237d" gracePeriod=30 Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.326438 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-scripts" (OuterVolumeSpecName: "scripts") pod "4f7b8261-7f82-49d0-932a-7a3a6c3ba298" (UID: "4f7b8261-7f82-49d0-932a-7a3a6c3ba298"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.330967 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-kube-api-access-2vl5j" (OuterVolumeSpecName: "kube-api-access-2vl5j") pod "4f7b8261-7f82-49d0-932a-7a3a6c3ba298" (UID: "4f7b8261-7f82-49d0-932a-7a3a6c3ba298"). InnerVolumeSpecName "kube-api-access-2vl5j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.332679 4691 generic.go:334] "Generic (PLEG): container finished" podID="4f7b8261-7f82-49d0-932a-7a3a6c3ba298" containerID="d1214b8c7dcd661729ab5f6eb154bb4d1d9594c71856fdccae71ff5610970168" exitCode=143 Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.332723 4691 generic.go:334] "Generic (PLEG): container finished" podID="4f7b8261-7f82-49d0-932a-7a3a6c3ba298" containerID="3f699a45f34a7c4ca37c6b0d9a38d3497f1e3dad6b23c735b0893c53fd3c6ef7" exitCode=143 Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.332989 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4f7b8261-7f82-49d0-932a-7a3a6c3ba298","Type":"ContainerDied","Data":"d1214b8c7dcd661729ab5f6eb154bb4d1d9594c71856fdccae71ff5610970168"} Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.333098 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.333117 4691 scope.go:117] "RemoveContainer" containerID="d1214b8c7dcd661729ab5f6eb154bb4d1d9594c71856fdccae71ff5610970168" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.333102 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4f7b8261-7f82-49d0-932a-7a3a6c3ba298","Type":"ContainerDied","Data":"3f699a45f34a7c4ca37c6b0d9a38d3497f1e3dad6b23c735b0893c53fd3c6ef7"} Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.333315 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4f7b8261-7f82-49d0-932a-7a3a6c3ba298","Type":"ContainerDied","Data":"f7b985be0860ff458e4e0e8be7d956d9b9e8c0efdff81111084872fb39467339"} Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.352258 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=5.352229797 podStartE2EDuration="5.352229797s" podCreationTimestamp="2025-11-24 08:13:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:13:20.347361646 +0000 UTC m=+962.346310905" watchObservedRunningTime="2025-11-24 08:13:20.352229797 +0000 UTC m=+962.351179046" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.366421 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4f7b8261-7f82-49d0-932a-7a3a6c3ba298" (UID: "4f7b8261-7f82-49d0-932a-7a3a6c3ba298"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.403553 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "4f7b8261-7f82-49d0-932a-7a3a6c3ba298" (UID: "4f7b8261-7f82-49d0-932a-7a3a6c3ba298"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.413372 4691 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.413430 4691 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.413483 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2vl5j\" (UniqueName: \"kubernetes.io/projected/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-kube-api-access-2vl5j\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.413492 4691 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.413502 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.413511 4691 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-logs\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.413519 4691 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.413727 4691 scope.go:117] "RemoveContainer" containerID="3f699a45f34a7c4ca37c6b0d9a38d3497f1e3dad6b23c735b0893c53fd3c6ef7" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.419245 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-config-data" (OuterVolumeSpecName: "config-data") pod "4f7b8261-7f82-49d0-932a-7a3a6c3ba298" (UID: "4f7b8261-7f82-49d0-932a-7a3a6c3ba298"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.436702 4691 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.514990 4691 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.515028 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f7b8261-7f82-49d0-932a-7a3a6c3ba298-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.626048 4691 scope.go:117] "RemoveContainer" containerID="d1214b8c7dcd661729ab5f6eb154bb4d1d9594c71856fdccae71ff5610970168" Nov 24 08:13:20 crc kubenswrapper[4691]: E1124 08:13:20.627004 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d1214b8c7dcd661729ab5f6eb154bb4d1d9594c71856fdccae71ff5610970168\": container with ID starting with d1214b8c7dcd661729ab5f6eb154bb4d1d9594c71856fdccae71ff5610970168 not found: ID does not exist" containerID="d1214b8c7dcd661729ab5f6eb154bb4d1d9594c71856fdccae71ff5610970168" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.627050 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d1214b8c7dcd661729ab5f6eb154bb4d1d9594c71856fdccae71ff5610970168"} err="failed to get container status \"d1214b8c7dcd661729ab5f6eb154bb4d1d9594c71856fdccae71ff5610970168\": rpc error: code = NotFound desc = could not find container \"d1214b8c7dcd661729ab5f6eb154bb4d1d9594c71856fdccae71ff5610970168\": container with ID starting with d1214b8c7dcd661729ab5f6eb154bb4d1d9594c71856fdccae71ff5610970168 not found: ID does not exist" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.627089 4691 scope.go:117] "RemoveContainer" containerID="3f699a45f34a7c4ca37c6b0d9a38d3497f1e3dad6b23c735b0893c53fd3c6ef7" Nov 24 08:13:20 crc kubenswrapper[4691]: E1124 08:13:20.627772 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f699a45f34a7c4ca37c6b0d9a38d3497f1e3dad6b23c735b0893c53fd3c6ef7\": container with ID starting with 3f699a45f34a7c4ca37c6b0d9a38d3497f1e3dad6b23c735b0893c53fd3c6ef7 not found: ID does not exist" containerID="3f699a45f34a7c4ca37c6b0d9a38d3497f1e3dad6b23c735b0893c53fd3c6ef7" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.627827 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f699a45f34a7c4ca37c6b0d9a38d3497f1e3dad6b23c735b0893c53fd3c6ef7"} err="failed to get container status \"3f699a45f34a7c4ca37c6b0d9a38d3497f1e3dad6b23c735b0893c53fd3c6ef7\": rpc error: code = NotFound desc = could not find container \"3f699a45f34a7c4ca37c6b0d9a38d3497f1e3dad6b23c735b0893c53fd3c6ef7\": container with ID starting with 3f699a45f34a7c4ca37c6b0d9a38d3497f1e3dad6b23c735b0893c53fd3c6ef7 not found: ID does not exist" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.627888 4691 scope.go:117] "RemoveContainer" containerID="d1214b8c7dcd661729ab5f6eb154bb4d1d9594c71856fdccae71ff5610970168" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.631651 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d1214b8c7dcd661729ab5f6eb154bb4d1d9594c71856fdccae71ff5610970168"} err="failed to get container status \"d1214b8c7dcd661729ab5f6eb154bb4d1d9594c71856fdccae71ff5610970168\": rpc error: code = NotFound desc = could not find container \"d1214b8c7dcd661729ab5f6eb154bb4d1d9594c71856fdccae71ff5610970168\": container with ID starting with d1214b8c7dcd661729ab5f6eb154bb4d1d9594c71856fdccae71ff5610970168 not found: ID does not exist" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.631697 4691 scope.go:117] "RemoveContainer" containerID="3f699a45f34a7c4ca37c6b0d9a38d3497f1e3dad6b23c735b0893c53fd3c6ef7" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.635637 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f699a45f34a7c4ca37c6b0d9a38d3497f1e3dad6b23c735b0893c53fd3c6ef7"} err="failed to get container status \"3f699a45f34a7c4ca37c6b0d9a38d3497f1e3dad6b23c735b0893c53fd3c6ef7\": rpc error: code = NotFound desc = could not find container \"3f699a45f34a7c4ca37c6b0d9a38d3497f1e3dad6b23c735b0893c53fd3c6ef7\": container with ID starting with 3f699a45f34a7c4ca37c6b0d9a38d3497f1e3dad6b23c735b0893c53fd3c6ef7 not found: ID does not exist" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.717506 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.742762 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.753319 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 08:13:20 crc kubenswrapper[4691]: E1124 08:13:20.755733 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f7b8261-7f82-49d0-932a-7a3a6c3ba298" containerName="glance-log" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.755760 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f7b8261-7f82-49d0-932a-7a3a6c3ba298" containerName="glance-log" Nov 24 08:13:20 crc kubenswrapper[4691]: E1124 08:13:20.755775 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f7b8261-7f82-49d0-932a-7a3a6c3ba298" containerName="glance-httpd" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.755783 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f7b8261-7f82-49d0-932a-7a3a6c3ba298" containerName="glance-httpd" Nov 24 08:13:20 crc kubenswrapper[4691]: E1124 08:13:20.755806 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8beacf4c-49bf-4497-9a54-0ba1239969b3" containerName="init" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.755812 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="8beacf4c-49bf-4497-9a54-0ba1239969b3" containerName="init" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.756142 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f7b8261-7f82-49d0-932a-7a3a6c3ba298" containerName="glance-httpd" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.756158 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f7b8261-7f82-49d0-932a-7a3a6c3ba298" containerName="glance-log" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.756231 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="8beacf4c-49bf-4497-9a54-0ba1239969b3" containerName="init" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.758320 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.771081 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.792185 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.842046 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/998605ae-db3f-4eb2-9345-d5d940b82461-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"998605ae-db3f-4eb2-9345-d5d940b82461\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.842193 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/998605ae-db3f-4eb2-9345-d5d940b82461-scripts\") pod \"glance-default-external-api-0\" (UID: \"998605ae-db3f-4eb2-9345-d5d940b82461\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.842234 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/998605ae-db3f-4eb2-9345-d5d940b82461-logs\") pod \"glance-default-external-api-0\" (UID: \"998605ae-db3f-4eb2-9345-d5d940b82461\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.842820 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkvck\" (UniqueName: \"kubernetes.io/projected/998605ae-db3f-4eb2-9345-d5d940b82461-kube-api-access-qkvck\") pod \"glance-default-external-api-0\" (UID: \"998605ae-db3f-4eb2-9345-d5d940b82461\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.843009 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/998605ae-db3f-4eb2-9345-d5d940b82461-config-data\") pod \"glance-default-external-api-0\" (UID: \"998605ae-db3f-4eb2-9345-d5d940b82461\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.843114 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"998605ae-db3f-4eb2-9345-d5d940b82461\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.843195 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/998605ae-db3f-4eb2-9345-d5d940b82461-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"998605ae-db3f-4eb2-9345-d5d940b82461\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.843357 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/998605ae-db3f-4eb2-9345-d5d940b82461-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"998605ae-db3f-4eb2-9345-d5d940b82461\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.846511 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4f7b8261-7f82-49d0-932a-7a3a6c3ba298" path="/var/lib/kubelet/pods/4f7b8261-7f82-49d0-932a-7a3a6c3ba298/volumes" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.850199 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8beacf4c-49bf-4497-9a54-0ba1239969b3" path="/var/lib/kubelet/pods/8beacf4c-49bf-4497-9a54-0ba1239969b3/volumes" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.850890 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.964351 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"998605ae-db3f-4eb2-9345-d5d940b82461\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.964439 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/998605ae-db3f-4eb2-9345-d5d940b82461-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"998605ae-db3f-4eb2-9345-d5d940b82461\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.964511 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/998605ae-db3f-4eb2-9345-d5d940b82461-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"998605ae-db3f-4eb2-9345-d5d940b82461\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.964580 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/998605ae-db3f-4eb2-9345-d5d940b82461-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"998605ae-db3f-4eb2-9345-d5d940b82461\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.964605 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/998605ae-db3f-4eb2-9345-d5d940b82461-scripts\") pod \"glance-default-external-api-0\" (UID: \"998605ae-db3f-4eb2-9345-d5d940b82461\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.964624 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/998605ae-db3f-4eb2-9345-d5d940b82461-logs\") pod \"glance-default-external-api-0\" (UID: \"998605ae-db3f-4eb2-9345-d5d940b82461\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.964687 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkvck\" (UniqueName: \"kubernetes.io/projected/998605ae-db3f-4eb2-9345-d5d940b82461-kube-api-access-qkvck\") pod \"glance-default-external-api-0\" (UID: \"998605ae-db3f-4eb2-9345-d5d940b82461\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.964720 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/998605ae-db3f-4eb2-9345-d5d940b82461-config-data\") pod \"glance-default-external-api-0\" (UID: \"998605ae-db3f-4eb2-9345-d5d940b82461\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.969149 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/998605ae-db3f-4eb2-9345-d5d940b82461-logs\") pod \"glance-default-external-api-0\" (UID: \"998605ae-db3f-4eb2-9345-d5d940b82461\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.974215 4691 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"998605ae-db3f-4eb2-9345-d5d940b82461\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-external-api-0" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.987848 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/998605ae-db3f-4eb2-9345-d5d940b82461-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"998605ae-db3f-4eb2-9345-d5d940b82461\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.993973 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/998605ae-db3f-4eb2-9345-d5d940b82461-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"998605ae-db3f-4eb2-9345-d5d940b82461\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:20 crc kubenswrapper[4691]: I1124 08:13:20.996523 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/998605ae-db3f-4eb2-9345-d5d940b82461-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"998605ae-db3f-4eb2-9345-d5d940b82461\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:20.998583 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/998605ae-db3f-4eb2-9345-d5d940b82461-scripts\") pod \"glance-default-external-api-0\" (UID: \"998605ae-db3f-4eb2-9345-d5d940b82461\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.010972 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/998605ae-db3f-4eb2-9345-d5d940b82461-config-data\") pod \"glance-default-external-api-0\" (UID: \"998605ae-db3f-4eb2-9345-d5d940b82461\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.027435 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkvck\" (UniqueName: \"kubernetes.io/projected/998605ae-db3f-4eb2-9345-d5d940b82461-kube-api-access-qkvck\") pod \"glance-default-external-api-0\" (UID: \"998605ae-db3f-4eb2-9345-d5d940b82461\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.094402 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.094529 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.172225 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"998605ae-db3f-4eb2-9345-d5d940b82461\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.281318 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.366973 4691 generic.go:334] "Generic (PLEG): container finished" podID="6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a" containerID="f14a45cc801cd88ea97987e654ce842d7490529de657dca83406b36cfadd237d" exitCode=143 Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.367609 4691 generic.go:334] "Generic (PLEG): container finished" podID="6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a" containerID="0e0d8784ebec3c3ae5c188d680642a6c1ad08794631905b74557263f7041e131" exitCode=143 Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.367081 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.367089 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a","Type":"ContainerDied","Data":"f14a45cc801cd88ea97987e654ce842d7490529de657dca83406b36cfadd237d"} Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.367798 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a","Type":"ContainerDied","Data":"0e0d8784ebec3c3ae5c188d680642a6c1ad08794631905b74557263f7041e131"} Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.367815 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a","Type":"ContainerDied","Data":"7ab634270cd255930ad61580c20c706419798f6223b9f514df6bde229e9a9909"} Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.367835 4691 scope.go:117] "RemoveContainer" containerID="f14a45cc801cd88ea97987e654ce842d7490529de657dca83406b36cfadd237d" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.395376 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-config-data\") pod \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\" (UID: \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\") " Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.395468 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\" (UID: \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\") " Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.395604 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-scripts\") pod \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\" (UID: \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\") " Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.395643 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-internal-tls-certs\") pod \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\" (UID: \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\") " Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.395689 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l2fp7\" (UniqueName: \"kubernetes.io/projected/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-kube-api-access-l2fp7\") pod \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\" (UID: \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\") " Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.395730 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-httpd-run\") pod \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\" (UID: \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\") " Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.395757 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-logs\") pod \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\" (UID: \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\") " Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.395863 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-combined-ca-bundle\") pod \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\" (UID: \"6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a\") " Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.398517 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-logs" (OuterVolumeSpecName: "logs") pod "6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a" (UID: "6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.399122 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a" (UID: "6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.402247 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-scripts" (OuterVolumeSpecName: "scripts") pod "6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a" (UID: "6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.403847 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-kube-api-access-l2fp7" (OuterVolumeSpecName: "kube-api-access-l2fp7") pod "6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a" (UID: "6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a"). InnerVolumeSpecName "kube-api-access-l2fp7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.403952 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a" (UID: "6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.413699 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.440387 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a" (UID: "6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.463215 4691 scope.go:117] "RemoveContainer" containerID="0e0d8784ebec3c3ae5c188d680642a6c1ad08794631905b74557263f7041e131" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.469774 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-config-data" (OuterVolumeSpecName: "config-data") pod "6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a" (UID: "6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.479504 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a" (UID: "6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.503138 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.503222 4691 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.503240 4691 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.503252 4691 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.503268 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l2fp7\" (UniqueName: \"kubernetes.io/projected/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-kube-api-access-l2fp7\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.503280 4691 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.503290 4691 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-logs\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.503300 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.527497 4691 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.605359 4691 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.618062 4691 scope.go:117] "RemoveContainer" containerID="f14a45cc801cd88ea97987e654ce842d7490529de657dca83406b36cfadd237d" Nov 24 08:13:21 crc kubenswrapper[4691]: E1124 08:13:21.619403 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f14a45cc801cd88ea97987e654ce842d7490529de657dca83406b36cfadd237d\": container with ID starting with f14a45cc801cd88ea97987e654ce842d7490529de657dca83406b36cfadd237d not found: ID does not exist" containerID="f14a45cc801cd88ea97987e654ce842d7490529de657dca83406b36cfadd237d" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.619469 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f14a45cc801cd88ea97987e654ce842d7490529de657dca83406b36cfadd237d"} err="failed to get container status \"f14a45cc801cd88ea97987e654ce842d7490529de657dca83406b36cfadd237d\": rpc error: code = NotFound desc = could not find container \"f14a45cc801cd88ea97987e654ce842d7490529de657dca83406b36cfadd237d\": container with ID starting with f14a45cc801cd88ea97987e654ce842d7490529de657dca83406b36cfadd237d not found: ID does not exist" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.619498 4691 scope.go:117] "RemoveContainer" containerID="0e0d8784ebec3c3ae5c188d680642a6c1ad08794631905b74557263f7041e131" Nov 24 08:13:21 crc kubenswrapper[4691]: E1124 08:13:21.620194 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0e0d8784ebec3c3ae5c188d680642a6c1ad08794631905b74557263f7041e131\": container with ID starting with 0e0d8784ebec3c3ae5c188d680642a6c1ad08794631905b74557263f7041e131 not found: ID does not exist" containerID="0e0d8784ebec3c3ae5c188d680642a6c1ad08794631905b74557263f7041e131" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.620220 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0e0d8784ebec3c3ae5c188d680642a6c1ad08794631905b74557263f7041e131"} err="failed to get container status \"0e0d8784ebec3c3ae5c188d680642a6c1ad08794631905b74557263f7041e131\": rpc error: code = NotFound desc = could not find container \"0e0d8784ebec3c3ae5c188d680642a6c1ad08794631905b74557263f7041e131\": container with ID starting with 0e0d8784ebec3c3ae5c188d680642a6c1ad08794631905b74557263f7041e131 not found: ID does not exist" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.620241 4691 scope.go:117] "RemoveContainer" containerID="f14a45cc801cd88ea97987e654ce842d7490529de657dca83406b36cfadd237d" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.620996 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f14a45cc801cd88ea97987e654ce842d7490529de657dca83406b36cfadd237d"} err="failed to get container status \"f14a45cc801cd88ea97987e654ce842d7490529de657dca83406b36cfadd237d\": rpc error: code = NotFound desc = could not find container \"f14a45cc801cd88ea97987e654ce842d7490529de657dca83406b36cfadd237d\": container with ID starting with f14a45cc801cd88ea97987e654ce842d7490529de657dca83406b36cfadd237d not found: ID does not exist" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.621061 4691 scope.go:117] "RemoveContainer" containerID="0e0d8784ebec3c3ae5c188d680642a6c1ad08794631905b74557263f7041e131" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.621483 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0e0d8784ebec3c3ae5c188d680642a6c1ad08794631905b74557263f7041e131"} err="failed to get container status \"0e0d8784ebec3c3ae5c188d680642a6c1ad08794631905b74557263f7041e131\": rpc error: code = NotFound desc = could not find container \"0e0d8784ebec3c3ae5c188d680642a6c1ad08794631905b74557263f7041e131\": container with ID starting with 0e0d8784ebec3c3ae5c188d680642a6c1ad08794631905b74557263f7041e131 not found: ID does not exist" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.728567 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.757398 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.776557 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 08:13:21 crc kubenswrapper[4691]: E1124 08:13:21.779220 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a" containerName="glance-httpd" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.779267 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a" containerName="glance-httpd" Nov 24 08:13:21 crc kubenswrapper[4691]: E1124 08:13:21.779350 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a" containerName="glance-log" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.779362 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a" containerName="glance-log" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.779815 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a" containerName="glance-httpd" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.779893 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a" containerName="glance-log" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.782418 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.786195 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.786691 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.786805 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.914884 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9f9499a7-a024-469f-a238-e4e3a60b4c9e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.914978 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9f9499a7-a024-469f-a238-e4e3a60b4c9e-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.915023 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9f9499a7-a024-469f-a238-e4e3a60b4c9e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.915083 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.915531 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rnppg\" (UniqueName: \"kubernetes.io/projected/9f9499a7-a024-469f-a238-e4e3a60b4c9e-kube-api-access-rnppg\") pod \"glance-default-internal-api-0\" (UID: \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.915706 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f9499a7-a024-469f-a238-e4e3a60b4c9e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.915962 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f9499a7-a024-469f-a238-e4e3a60b4c9e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:21 crc kubenswrapper[4691]: I1124 08:13:21.916211 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f9499a7-a024-469f-a238-e4e3a60b4c9e-logs\") pod \"glance-default-internal-api-0\" (UID: \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:22 crc kubenswrapper[4691]: I1124 08:13:22.018961 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9f9499a7-a024-469f-a238-e4e3a60b4c9e-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:22 crc kubenswrapper[4691]: I1124 08:13:22.019028 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9f9499a7-a024-469f-a238-e4e3a60b4c9e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:22 crc kubenswrapper[4691]: I1124 08:13:22.019078 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:22 crc kubenswrapper[4691]: I1124 08:13:22.019147 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rnppg\" (UniqueName: \"kubernetes.io/projected/9f9499a7-a024-469f-a238-e4e3a60b4c9e-kube-api-access-rnppg\") pod \"glance-default-internal-api-0\" (UID: \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:22 crc kubenswrapper[4691]: I1124 08:13:22.019195 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f9499a7-a024-469f-a238-e4e3a60b4c9e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:22 crc kubenswrapper[4691]: I1124 08:13:22.019222 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f9499a7-a024-469f-a238-e4e3a60b4c9e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:22 crc kubenswrapper[4691]: I1124 08:13:22.019303 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f9499a7-a024-469f-a238-e4e3a60b4c9e-logs\") pod \"glance-default-internal-api-0\" (UID: \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:22 crc kubenswrapper[4691]: I1124 08:13:22.019383 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9f9499a7-a024-469f-a238-e4e3a60b4c9e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:22 crc kubenswrapper[4691]: I1124 08:13:22.020405 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9f9499a7-a024-469f-a238-e4e3a60b4c9e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:22 crc kubenswrapper[4691]: I1124 08:13:22.020577 4691 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-internal-api-0" Nov 24 08:13:22 crc kubenswrapper[4691]: I1124 08:13:22.020687 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f9499a7-a024-469f-a238-e4e3a60b4c9e-logs\") pod \"glance-default-internal-api-0\" (UID: \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:22 crc kubenswrapper[4691]: I1124 08:13:22.028230 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9f9499a7-a024-469f-a238-e4e3a60b4c9e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:22 crc kubenswrapper[4691]: I1124 08:13:22.030358 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f9499a7-a024-469f-a238-e4e3a60b4c9e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:22 crc kubenswrapper[4691]: I1124 08:13:22.030684 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f9499a7-a024-469f-a238-e4e3a60b4c9e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:22 crc kubenswrapper[4691]: I1124 08:13:22.032353 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9f9499a7-a024-469f-a238-e4e3a60b4c9e-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:22 crc kubenswrapper[4691]: I1124 08:13:22.039890 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rnppg\" (UniqueName: \"kubernetes.io/projected/9f9499a7-a024-469f-a238-e4e3a60b4c9e-kube-api-access-rnppg\") pod \"glance-default-internal-api-0\" (UID: \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:22 crc kubenswrapper[4691]: I1124 08:13:22.073880 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:22 crc kubenswrapper[4691]: I1124 08:13:22.087203 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 08:13:22 crc kubenswrapper[4691]: I1124 08:13:22.105068 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 24 08:13:22 crc kubenswrapper[4691]: I1124 08:13:22.399737 4691 generic.go:334] "Generic (PLEG): container finished" podID="93da1283-17cd-4b15-a4dd-db78d80c187e" containerID="eccd8ce2de7bf41e77a7fbe034d937c8020882af501290d4e72451ca8383d5f1" exitCode=0 Nov 24 08:13:22 crc kubenswrapper[4691]: I1124 08:13:22.399825 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-jtk2j" event={"ID":"93da1283-17cd-4b15-a4dd-db78d80c187e","Type":"ContainerDied","Data":"eccd8ce2de7bf41e77a7fbe034d937c8020882af501290d4e72451ca8383d5f1"} Nov 24 08:13:22 crc kubenswrapper[4691]: I1124 08:13:22.799922 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a" path="/var/lib/kubelet/pods/6c9e8eca-5ec9-4bbc-b1a4-5246d932eb2a/volumes" Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.704556 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-7f64cf65df-8hgb4"] Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.756424 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-77477f4d7b-kclfz"] Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.758449 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-77477f4d7b-kclfz" Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.761127 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.780341 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-77477f4d7b-kclfz"] Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.791940 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.847847 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-658647c585-2tj4h"] Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.867598 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-5fb4677cdd-69rb6"] Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.869537 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5fb4677cdd-69rb6" Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.874279 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/567ed4cd-aaf3-4e52-be70-2f723075d545-scripts\") pod \"horizon-77477f4d7b-kclfz\" (UID: \"567ed4cd-aaf3-4e52-be70-2f723075d545\") " pod="openstack/horizon-77477f4d7b-kclfz" Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.874373 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/567ed4cd-aaf3-4e52-be70-2f723075d545-horizon-secret-key\") pod \"horizon-77477f4d7b-kclfz\" (UID: \"567ed4cd-aaf3-4e52-be70-2f723075d545\") " pod="openstack/horizon-77477f4d7b-kclfz" Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.874410 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/567ed4cd-aaf3-4e52-be70-2f723075d545-horizon-tls-certs\") pod \"horizon-77477f4d7b-kclfz\" (UID: \"567ed4cd-aaf3-4e52-be70-2f723075d545\") " pod="openstack/horizon-77477f4d7b-kclfz" Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.874448 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-npn47\" (UniqueName: \"kubernetes.io/projected/567ed4cd-aaf3-4e52-be70-2f723075d545-kube-api-access-npn47\") pod \"horizon-77477f4d7b-kclfz\" (UID: \"567ed4cd-aaf3-4e52-be70-2f723075d545\") " pod="openstack/horizon-77477f4d7b-kclfz" Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.874493 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/567ed4cd-aaf3-4e52-be70-2f723075d545-logs\") pod \"horizon-77477f4d7b-kclfz\" (UID: \"567ed4cd-aaf3-4e52-be70-2f723075d545\") " pod="openstack/horizon-77477f4d7b-kclfz" Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.874521 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/567ed4cd-aaf3-4e52-be70-2f723075d545-combined-ca-bundle\") pod \"horizon-77477f4d7b-kclfz\" (UID: \"567ed4cd-aaf3-4e52-be70-2f723075d545\") " pod="openstack/horizon-77477f4d7b-kclfz" Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.874544 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/567ed4cd-aaf3-4e52-be70-2f723075d545-config-data\") pod \"horizon-77477f4d7b-kclfz\" (UID: \"567ed4cd-aaf3-4e52-be70-2f723075d545\") " pod="openstack/horizon-77477f4d7b-kclfz" Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.882126 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.896773 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5fb4677cdd-69rb6"] Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.977122 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/5f7435d6-aa83-41a0-b392-b06d77f53aa2-horizon-secret-key\") pod \"horizon-5fb4677cdd-69rb6\" (UID: \"5f7435d6-aa83-41a0-b392-b06d77f53aa2\") " pod="openstack/horizon-5fb4677cdd-69rb6" Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.977187 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/567ed4cd-aaf3-4e52-be70-2f723075d545-logs\") pod \"horizon-77477f4d7b-kclfz\" (UID: \"567ed4cd-aaf3-4e52-be70-2f723075d545\") " pod="openstack/horizon-77477f4d7b-kclfz" Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.977251 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/567ed4cd-aaf3-4e52-be70-2f723075d545-combined-ca-bundle\") pod \"horizon-77477f4d7b-kclfz\" (UID: \"567ed4cd-aaf3-4e52-be70-2f723075d545\") " pod="openstack/horizon-77477f4d7b-kclfz" Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.977303 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/567ed4cd-aaf3-4e52-be70-2f723075d545-config-data\") pod \"horizon-77477f4d7b-kclfz\" (UID: \"567ed4cd-aaf3-4e52-be70-2f723075d545\") " pod="openstack/horizon-77477f4d7b-kclfz" Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.977535 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6kq2x\" (UniqueName: \"kubernetes.io/projected/5f7435d6-aa83-41a0-b392-b06d77f53aa2-kube-api-access-6kq2x\") pod \"horizon-5fb4677cdd-69rb6\" (UID: \"5f7435d6-aa83-41a0-b392-b06d77f53aa2\") " pod="openstack/horizon-5fb4677cdd-69rb6" Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.977582 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/567ed4cd-aaf3-4e52-be70-2f723075d545-scripts\") pod \"horizon-77477f4d7b-kclfz\" (UID: \"567ed4cd-aaf3-4e52-be70-2f723075d545\") " pod="openstack/horizon-77477f4d7b-kclfz" Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.977625 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/5f7435d6-aa83-41a0-b392-b06d77f53aa2-horizon-tls-certs\") pod \"horizon-5fb4677cdd-69rb6\" (UID: \"5f7435d6-aa83-41a0-b392-b06d77f53aa2\") " pod="openstack/horizon-5fb4677cdd-69rb6" Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.977656 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5f7435d6-aa83-41a0-b392-b06d77f53aa2-scripts\") pod \"horizon-5fb4677cdd-69rb6\" (UID: \"5f7435d6-aa83-41a0-b392-b06d77f53aa2\") " pod="openstack/horizon-5fb4677cdd-69rb6" Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.977795 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/567ed4cd-aaf3-4e52-be70-2f723075d545-horizon-secret-key\") pod \"horizon-77477f4d7b-kclfz\" (UID: \"567ed4cd-aaf3-4e52-be70-2f723075d545\") " pod="openstack/horizon-77477f4d7b-kclfz" Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.977832 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f7435d6-aa83-41a0-b392-b06d77f53aa2-combined-ca-bundle\") pod \"horizon-5fb4677cdd-69rb6\" (UID: \"5f7435d6-aa83-41a0-b392-b06d77f53aa2\") " pod="openstack/horizon-5fb4677cdd-69rb6" Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.977844 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/567ed4cd-aaf3-4e52-be70-2f723075d545-logs\") pod \"horizon-77477f4d7b-kclfz\" (UID: \"567ed4cd-aaf3-4e52-be70-2f723075d545\") " pod="openstack/horizon-77477f4d7b-kclfz" Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.977877 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/567ed4cd-aaf3-4e52-be70-2f723075d545-horizon-tls-certs\") pod \"horizon-77477f4d7b-kclfz\" (UID: \"567ed4cd-aaf3-4e52-be70-2f723075d545\") " pod="openstack/horizon-77477f4d7b-kclfz" Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.977959 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5f7435d6-aa83-41a0-b392-b06d77f53aa2-logs\") pod \"horizon-5fb4677cdd-69rb6\" (UID: \"5f7435d6-aa83-41a0-b392-b06d77f53aa2\") " pod="openstack/horizon-5fb4677cdd-69rb6" Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.978012 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5f7435d6-aa83-41a0-b392-b06d77f53aa2-config-data\") pod \"horizon-5fb4677cdd-69rb6\" (UID: \"5f7435d6-aa83-41a0-b392-b06d77f53aa2\") " pod="openstack/horizon-5fb4677cdd-69rb6" Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.978051 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-npn47\" (UniqueName: \"kubernetes.io/projected/567ed4cd-aaf3-4e52-be70-2f723075d545-kube-api-access-npn47\") pod \"horizon-77477f4d7b-kclfz\" (UID: \"567ed4cd-aaf3-4e52-be70-2f723075d545\") " pod="openstack/horizon-77477f4d7b-kclfz" Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.978966 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/567ed4cd-aaf3-4e52-be70-2f723075d545-scripts\") pod \"horizon-77477f4d7b-kclfz\" (UID: \"567ed4cd-aaf3-4e52-be70-2f723075d545\") " pod="openstack/horizon-77477f4d7b-kclfz" Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.980578 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/567ed4cd-aaf3-4e52-be70-2f723075d545-config-data\") pod \"horizon-77477f4d7b-kclfz\" (UID: \"567ed4cd-aaf3-4e52-be70-2f723075d545\") " pod="openstack/horizon-77477f4d7b-kclfz" Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.986759 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/567ed4cd-aaf3-4e52-be70-2f723075d545-combined-ca-bundle\") pod \"horizon-77477f4d7b-kclfz\" (UID: \"567ed4cd-aaf3-4e52-be70-2f723075d545\") " pod="openstack/horizon-77477f4d7b-kclfz" Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.987104 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/567ed4cd-aaf3-4e52-be70-2f723075d545-horizon-secret-key\") pod \"horizon-77477f4d7b-kclfz\" (UID: \"567ed4cd-aaf3-4e52-be70-2f723075d545\") " pod="openstack/horizon-77477f4d7b-kclfz" Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.992268 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/567ed4cd-aaf3-4e52-be70-2f723075d545-horizon-tls-certs\") pod \"horizon-77477f4d7b-kclfz\" (UID: \"567ed4cd-aaf3-4e52-be70-2f723075d545\") " pod="openstack/horizon-77477f4d7b-kclfz" Nov 24 08:13:23 crc kubenswrapper[4691]: I1124 08:13:23.996868 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-npn47\" (UniqueName: \"kubernetes.io/projected/567ed4cd-aaf3-4e52-be70-2f723075d545-kube-api-access-npn47\") pod \"horizon-77477f4d7b-kclfz\" (UID: \"567ed4cd-aaf3-4e52-be70-2f723075d545\") " pod="openstack/horizon-77477f4d7b-kclfz" Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.080739 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6kq2x\" (UniqueName: \"kubernetes.io/projected/5f7435d6-aa83-41a0-b392-b06d77f53aa2-kube-api-access-6kq2x\") pod \"horizon-5fb4677cdd-69rb6\" (UID: \"5f7435d6-aa83-41a0-b392-b06d77f53aa2\") " pod="openstack/horizon-5fb4677cdd-69rb6" Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.081682 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/5f7435d6-aa83-41a0-b392-b06d77f53aa2-horizon-tls-certs\") pod \"horizon-5fb4677cdd-69rb6\" (UID: \"5f7435d6-aa83-41a0-b392-b06d77f53aa2\") " pod="openstack/horizon-5fb4677cdd-69rb6" Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.081828 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5f7435d6-aa83-41a0-b392-b06d77f53aa2-scripts\") pod \"horizon-5fb4677cdd-69rb6\" (UID: \"5f7435d6-aa83-41a0-b392-b06d77f53aa2\") " pod="openstack/horizon-5fb4677cdd-69rb6" Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.082014 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f7435d6-aa83-41a0-b392-b06d77f53aa2-combined-ca-bundle\") pod \"horizon-5fb4677cdd-69rb6\" (UID: \"5f7435d6-aa83-41a0-b392-b06d77f53aa2\") " pod="openstack/horizon-5fb4677cdd-69rb6" Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.082136 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5f7435d6-aa83-41a0-b392-b06d77f53aa2-logs\") pod \"horizon-5fb4677cdd-69rb6\" (UID: \"5f7435d6-aa83-41a0-b392-b06d77f53aa2\") " pod="openstack/horizon-5fb4677cdd-69rb6" Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.082228 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5f7435d6-aa83-41a0-b392-b06d77f53aa2-config-data\") pod \"horizon-5fb4677cdd-69rb6\" (UID: \"5f7435d6-aa83-41a0-b392-b06d77f53aa2\") " pod="openstack/horizon-5fb4677cdd-69rb6" Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.082333 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/5f7435d6-aa83-41a0-b392-b06d77f53aa2-horizon-secret-key\") pod \"horizon-5fb4677cdd-69rb6\" (UID: \"5f7435d6-aa83-41a0-b392-b06d77f53aa2\") " pod="openstack/horizon-5fb4677cdd-69rb6" Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.083547 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5f7435d6-aa83-41a0-b392-b06d77f53aa2-logs\") pod \"horizon-5fb4677cdd-69rb6\" (UID: \"5f7435d6-aa83-41a0-b392-b06d77f53aa2\") " pod="openstack/horizon-5fb4677cdd-69rb6" Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.084142 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5f7435d6-aa83-41a0-b392-b06d77f53aa2-scripts\") pod \"horizon-5fb4677cdd-69rb6\" (UID: \"5f7435d6-aa83-41a0-b392-b06d77f53aa2\") " pod="openstack/horizon-5fb4677cdd-69rb6" Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.085058 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5f7435d6-aa83-41a0-b392-b06d77f53aa2-config-data\") pod \"horizon-5fb4677cdd-69rb6\" (UID: \"5f7435d6-aa83-41a0-b392-b06d77f53aa2\") " pod="openstack/horizon-5fb4677cdd-69rb6" Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.088002 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/5f7435d6-aa83-41a0-b392-b06d77f53aa2-horizon-tls-certs\") pod \"horizon-5fb4677cdd-69rb6\" (UID: \"5f7435d6-aa83-41a0-b392-b06d77f53aa2\") " pod="openstack/horizon-5fb4677cdd-69rb6" Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.089000 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/5f7435d6-aa83-41a0-b392-b06d77f53aa2-horizon-secret-key\") pod \"horizon-5fb4677cdd-69rb6\" (UID: \"5f7435d6-aa83-41a0-b392-b06d77f53aa2\") " pod="openstack/horizon-5fb4677cdd-69rb6" Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.089310 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f7435d6-aa83-41a0-b392-b06d77f53aa2-combined-ca-bundle\") pod \"horizon-5fb4677cdd-69rb6\" (UID: \"5f7435d6-aa83-41a0-b392-b06d77f53aa2\") " pod="openstack/horizon-5fb4677cdd-69rb6" Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.095880 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-77477f4d7b-kclfz" Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.100807 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6kq2x\" (UniqueName: \"kubernetes.io/projected/5f7435d6-aa83-41a0-b392-b06d77f53aa2-kube-api-access-6kq2x\") pod \"horizon-5fb4677cdd-69rb6\" (UID: \"5f7435d6-aa83-41a0-b392-b06d77f53aa2\") " pod="openstack/horizon-5fb4677cdd-69rb6" Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.212511 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5fb4677cdd-69rb6" Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.735177 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-jtk2j" Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.800716 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/93da1283-17cd-4b15-a4dd-db78d80c187e-credential-keys\") pod \"93da1283-17cd-4b15-a4dd-db78d80c187e\" (UID: \"93da1283-17cd-4b15-a4dd-db78d80c187e\") " Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.800795 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93da1283-17cd-4b15-a4dd-db78d80c187e-scripts\") pod \"93da1283-17cd-4b15-a4dd-db78d80c187e\" (UID: \"93da1283-17cd-4b15-a4dd-db78d80c187e\") " Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.800865 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93da1283-17cd-4b15-a4dd-db78d80c187e-combined-ca-bundle\") pod \"93da1283-17cd-4b15-a4dd-db78d80c187e\" (UID: \"93da1283-17cd-4b15-a4dd-db78d80c187e\") " Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.800894 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93da1283-17cd-4b15-a4dd-db78d80c187e-config-data\") pod \"93da1283-17cd-4b15-a4dd-db78d80c187e\" (UID: \"93da1283-17cd-4b15-a4dd-db78d80c187e\") " Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.800926 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/93da1283-17cd-4b15-a4dd-db78d80c187e-fernet-keys\") pod \"93da1283-17cd-4b15-a4dd-db78d80c187e\" (UID: \"93da1283-17cd-4b15-a4dd-db78d80c187e\") " Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.800971 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-62bgf\" (UniqueName: \"kubernetes.io/projected/93da1283-17cd-4b15-a4dd-db78d80c187e-kube-api-access-62bgf\") pod \"93da1283-17cd-4b15-a4dd-db78d80c187e\" (UID: \"93da1283-17cd-4b15-a4dd-db78d80c187e\") " Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.806950 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93da1283-17cd-4b15-a4dd-db78d80c187e-scripts" (OuterVolumeSpecName: "scripts") pod "93da1283-17cd-4b15-a4dd-db78d80c187e" (UID: "93da1283-17cd-4b15-a4dd-db78d80c187e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.808791 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93da1283-17cd-4b15-a4dd-db78d80c187e-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "93da1283-17cd-4b15-a4dd-db78d80c187e" (UID: "93da1283-17cd-4b15-a4dd-db78d80c187e"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.808846 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93da1283-17cd-4b15-a4dd-db78d80c187e-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "93da1283-17cd-4b15-a4dd-db78d80c187e" (UID: "93da1283-17cd-4b15-a4dd-db78d80c187e"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.809565 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93da1283-17cd-4b15-a4dd-db78d80c187e-kube-api-access-62bgf" (OuterVolumeSpecName: "kube-api-access-62bgf") pod "93da1283-17cd-4b15-a4dd-db78d80c187e" (UID: "93da1283-17cd-4b15-a4dd-db78d80c187e"). InnerVolumeSpecName "kube-api-access-62bgf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.829546 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93da1283-17cd-4b15-a4dd-db78d80c187e-config-data" (OuterVolumeSpecName: "config-data") pod "93da1283-17cd-4b15-a4dd-db78d80c187e" (UID: "93da1283-17cd-4b15-a4dd-db78d80c187e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.834118 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93da1283-17cd-4b15-a4dd-db78d80c187e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "93da1283-17cd-4b15-a4dd-db78d80c187e" (UID: "93da1283-17cd-4b15-a4dd-db78d80c187e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.904247 4691 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93da1283-17cd-4b15-a4dd-db78d80c187e-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.904288 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93da1283-17cd-4b15-a4dd-db78d80c187e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.904319 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93da1283-17cd-4b15-a4dd-db78d80c187e-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.904329 4691 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/93da1283-17cd-4b15-a4dd-db78d80c187e-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.904338 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-62bgf\" (UniqueName: \"kubernetes.io/projected/93da1283-17cd-4b15-a4dd-db78d80c187e-kube-api-access-62bgf\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:24 crc kubenswrapper[4691]: I1124 08:13:24.904355 4691 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/93da1283-17cd-4b15-a4dd-db78d80c187e-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:25 crc kubenswrapper[4691]: I1124 08:13:25.452428 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"998605ae-db3f-4eb2-9345-d5d940b82461","Type":"ContainerStarted","Data":"37b4404b8d6dd52702c87d8b7026e9a6a9502ec9970a92e8a729c5adfb8f2766"} Nov 24 08:13:25 crc kubenswrapper[4691]: I1124 08:13:25.458992 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-jtk2j" event={"ID":"93da1283-17cd-4b15-a4dd-db78d80c187e","Type":"ContainerDied","Data":"1ce455d41ea62c0166001b1c706999bc5c43e27c1e6ce2b323eded71770da8bc"} Nov 24 08:13:25 crc kubenswrapper[4691]: I1124 08:13:25.459046 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1ce455d41ea62c0166001b1c706999bc5c43e27c1e6ce2b323eded71770da8bc" Nov 24 08:13:25 crc kubenswrapper[4691]: I1124 08:13:25.459061 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-jtk2j" Nov 24 08:13:25 crc kubenswrapper[4691]: I1124 08:13:25.551720 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-56df8fb6b7-54t6g" Nov 24 08:13:25 crc kubenswrapper[4691]: I1124 08:13:25.624520 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f59b8f679-2vnf5"] Nov 24 08:13:25 crc kubenswrapper[4691]: I1124 08:13:25.624747 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" podUID="e4184c5f-6573-4950-8920-d0b3d7aa2989" containerName="dnsmasq-dns" containerID="cri-o://2a2a2562c36e38706ab1f8efc94201056f0d5cc564e742fbabbe272807054562" gracePeriod=10 Nov 24 08:13:25 crc kubenswrapper[4691]: I1124 08:13:25.829923 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-jtk2j"] Nov 24 08:13:25 crc kubenswrapper[4691]: I1124 08:13:25.843839 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-jtk2j"] Nov 24 08:13:25 crc kubenswrapper[4691]: I1124 08:13:25.940402 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-tx7zx"] Nov 24 08:13:25 crc kubenswrapper[4691]: E1124 08:13:25.941073 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93da1283-17cd-4b15-a4dd-db78d80c187e" containerName="keystone-bootstrap" Nov 24 08:13:25 crc kubenswrapper[4691]: I1124 08:13:25.941099 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="93da1283-17cd-4b15-a4dd-db78d80c187e" containerName="keystone-bootstrap" Nov 24 08:13:25 crc kubenswrapper[4691]: I1124 08:13:25.941294 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="93da1283-17cd-4b15-a4dd-db78d80c187e" containerName="keystone-bootstrap" Nov 24 08:13:25 crc kubenswrapper[4691]: I1124 08:13:25.942173 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-tx7zx" Nov 24 08:13:25 crc kubenswrapper[4691]: I1124 08:13:25.945492 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 24 08:13:25 crc kubenswrapper[4691]: I1124 08:13:25.945676 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 24 08:13:25 crc kubenswrapper[4691]: I1124 08:13:25.945830 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-rw8tv" Nov 24 08:13:25 crc kubenswrapper[4691]: I1124 08:13:25.945984 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 24 08:13:25 crc kubenswrapper[4691]: I1124 08:13:25.948031 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 24 08:13:25 crc kubenswrapper[4691]: I1124 08:13:25.970049 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-tx7zx"] Nov 24 08:13:26 crc kubenswrapper[4691]: I1124 08:13:26.030295 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1c22b4d-4593-461a-9096-f81674b136b7-scripts\") pod \"keystone-bootstrap-tx7zx\" (UID: \"d1c22b4d-4593-461a-9096-f81674b136b7\") " pod="openstack/keystone-bootstrap-tx7zx" Nov 24 08:13:26 crc kubenswrapper[4691]: I1124 08:13:26.030378 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d1c22b4d-4593-461a-9096-f81674b136b7-fernet-keys\") pod \"keystone-bootstrap-tx7zx\" (UID: \"d1c22b4d-4593-461a-9096-f81674b136b7\") " pod="openstack/keystone-bootstrap-tx7zx" Nov 24 08:13:26 crc kubenswrapper[4691]: I1124 08:13:26.030405 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1c22b4d-4593-461a-9096-f81674b136b7-config-data\") pod \"keystone-bootstrap-tx7zx\" (UID: \"d1c22b4d-4593-461a-9096-f81674b136b7\") " pod="openstack/keystone-bootstrap-tx7zx" Nov 24 08:13:26 crc kubenswrapper[4691]: I1124 08:13:26.030631 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kgpjc\" (UniqueName: \"kubernetes.io/projected/d1c22b4d-4593-461a-9096-f81674b136b7-kube-api-access-kgpjc\") pod \"keystone-bootstrap-tx7zx\" (UID: \"d1c22b4d-4593-461a-9096-f81674b136b7\") " pod="openstack/keystone-bootstrap-tx7zx" Nov 24 08:13:26 crc kubenswrapper[4691]: I1124 08:13:26.031117 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d1c22b4d-4593-461a-9096-f81674b136b7-credential-keys\") pod \"keystone-bootstrap-tx7zx\" (UID: \"d1c22b4d-4593-461a-9096-f81674b136b7\") " pod="openstack/keystone-bootstrap-tx7zx" Nov 24 08:13:26 crc kubenswrapper[4691]: I1124 08:13:26.031243 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1c22b4d-4593-461a-9096-f81674b136b7-combined-ca-bundle\") pod \"keystone-bootstrap-tx7zx\" (UID: \"d1c22b4d-4593-461a-9096-f81674b136b7\") " pod="openstack/keystone-bootstrap-tx7zx" Nov 24 08:13:26 crc kubenswrapper[4691]: I1124 08:13:26.133785 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kgpjc\" (UniqueName: \"kubernetes.io/projected/d1c22b4d-4593-461a-9096-f81674b136b7-kube-api-access-kgpjc\") pod \"keystone-bootstrap-tx7zx\" (UID: \"d1c22b4d-4593-461a-9096-f81674b136b7\") " pod="openstack/keystone-bootstrap-tx7zx" Nov 24 08:13:26 crc kubenswrapper[4691]: I1124 08:13:26.133869 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d1c22b4d-4593-461a-9096-f81674b136b7-credential-keys\") pod \"keystone-bootstrap-tx7zx\" (UID: \"d1c22b4d-4593-461a-9096-f81674b136b7\") " pod="openstack/keystone-bootstrap-tx7zx" Nov 24 08:13:26 crc kubenswrapper[4691]: I1124 08:13:26.133898 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1c22b4d-4593-461a-9096-f81674b136b7-combined-ca-bundle\") pod \"keystone-bootstrap-tx7zx\" (UID: \"d1c22b4d-4593-461a-9096-f81674b136b7\") " pod="openstack/keystone-bootstrap-tx7zx" Nov 24 08:13:26 crc kubenswrapper[4691]: I1124 08:13:26.133948 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1c22b4d-4593-461a-9096-f81674b136b7-scripts\") pod \"keystone-bootstrap-tx7zx\" (UID: \"d1c22b4d-4593-461a-9096-f81674b136b7\") " pod="openstack/keystone-bootstrap-tx7zx" Nov 24 08:13:26 crc kubenswrapper[4691]: I1124 08:13:26.133994 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d1c22b4d-4593-461a-9096-f81674b136b7-fernet-keys\") pod \"keystone-bootstrap-tx7zx\" (UID: \"d1c22b4d-4593-461a-9096-f81674b136b7\") " pod="openstack/keystone-bootstrap-tx7zx" Nov 24 08:13:26 crc kubenswrapper[4691]: I1124 08:13:26.134013 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1c22b4d-4593-461a-9096-f81674b136b7-config-data\") pod \"keystone-bootstrap-tx7zx\" (UID: \"d1c22b4d-4593-461a-9096-f81674b136b7\") " pod="openstack/keystone-bootstrap-tx7zx" Nov 24 08:13:26 crc kubenswrapper[4691]: I1124 08:13:26.141556 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1c22b4d-4593-461a-9096-f81674b136b7-config-data\") pod \"keystone-bootstrap-tx7zx\" (UID: \"d1c22b4d-4593-461a-9096-f81674b136b7\") " pod="openstack/keystone-bootstrap-tx7zx" Nov 24 08:13:26 crc kubenswrapper[4691]: I1124 08:13:26.142049 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1c22b4d-4593-461a-9096-f81674b136b7-scripts\") pod \"keystone-bootstrap-tx7zx\" (UID: \"d1c22b4d-4593-461a-9096-f81674b136b7\") " pod="openstack/keystone-bootstrap-tx7zx" Nov 24 08:13:26 crc kubenswrapper[4691]: I1124 08:13:26.142188 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1c22b4d-4593-461a-9096-f81674b136b7-combined-ca-bundle\") pod \"keystone-bootstrap-tx7zx\" (UID: \"d1c22b4d-4593-461a-9096-f81674b136b7\") " pod="openstack/keystone-bootstrap-tx7zx" Nov 24 08:13:26 crc kubenswrapper[4691]: I1124 08:13:26.143676 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d1c22b4d-4593-461a-9096-f81674b136b7-fernet-keys\") pod \"keystone-bootstrap-tx7zx\" (UID: \"d1c22b4d-4593-461a-9096-f81674b136b7\") " pod="openstack/keystone-bootstrap-tx7zx" Nov 24 08:13:26 crc kubenswrapper[4691]: I1124 08:13:26.151270 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d1c22b4d-4593-461a-9096-f81674b136b7-credential-keys\") pod \"keystone-bootstrap-tx7zx\" (UID: \"d1c22b4d-4593-461a-9096-f81674b136b7\") " pod="openstack/keystone-bootstrap-tx7zx" Nov 24 08:13:26 crc kubenswrapper[4691]: I1124 08:13:26.154612 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kgpjc\" (UniqueName: \"kubernetes.io/projected/d1c22b4d-4593-461a-9096-f81674b136b7-kube-api-access-kgpjc\") pod \"keystone-bootstrap-tx7zx\" (UID: \"d1c22b4d-4593-461a-9096-f81674b136b7\") " pod="openstack/keystone-bootstrap-tx7zx" Nov 24 08:13:26 crc kubenswrapper[4691]: I1124 08:13:26.269701 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-tx7zx" Nov 24 08:13:26 crc kubenswrapper[4691]: I1124 08:13:26.473354 4691 generic.go:334] "Generic (PLEG): container finished" podID="e4184c5f-6573-4950-8920-d0b3d7aa2989" containerID="2a2a2562c36e38706ab1f8efc94201056f0d5cc564e742fbabbe272807054562" exitCode=0 Nov 24 08:13:26 crc kubenswrapper[4691]: I1124 08:13:26.473411 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" event={"ID":"e4184c5f-6573-4950-8920-d0b3d7aa2989","Type":"ContainerDied","Data":"2a2a2562c36e38706ab1f8efc94201056f0d5cc564e742fbabbe272807054562"} Nov 24 08:13:26 crc kubenswrapper[4691]: I1124 08:13:26.771139 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93da1283-17cd-4b15-a4dd-db78d80c187e" path="/var/lib/kubelet/pods/93da1283-17cd-4b15-a4dd-db78d80c187e/volumes" Nov 24 08:13:28 crc kubenswrapper[4691]: I1124 08:13:28.603763 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" podUID="e4184c5f-6573-4950-8920-d0b3d7aa2989" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.129:5353: connect: connection refused" Nov 24 08:13:32 crc kubenswrapper[4691]: E1124 08:13:32.679721 4691 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Nov 24 08:13:32 crc kubenswrapper[4691]: E1124 08:13:32.680786 4691 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5c9h58ch94hc5h5c8h57ch55h6dhfdh64bh574h5b8h5b7h56ch5c5hf8h5d5h5b7h544h5b6h5b4h675h667h586hdbh54bhdfh54bh56bh66bh9hd7q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8ss7j,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-797fd4697-lsphq_openstack(7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 08:13:32 crc kubenswrapper[4691]: E1124 08:13:32.693397 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-797fd4697-lsphq" podUID="7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4" Nov 24 08:13:32 crc kubenswrapper[4691]: E1124 08:13:32.722800 4691 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Nov 24 08:13:32 crc kubenswrapper[4691]: E1124 08:13:32.723115 4691 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n577hbh545h5c8h685hddh97h579h545h5d5h5b5h676h8dh64dhc9h5cch7ch689h8hb5h98hbdh597h596h599h99h569h5dch564h66ch698h8dq,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vbqvg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-7f64cf65df-8hgb4_openstack(b581c7d3-dc44-4505-b3f7-fc7aff32f5df): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 08:13:32 crc kubenswrapper[4691]: E1124 08:13:32.725623 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-7f64cf65df-8hgb4" podUID="b581c7d3-dc44-4505-b3f7-fc7aff32f5df" Nov 24 08:13:33 crc kubenswrapper[4691]: I1124 08:13:33.603371 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" podUID="e4184c5f-6573-4950-8920-d0b3d7aa2989" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.129:5353: connect: connection refused" Nov 24 08:13:34 crc kubenswrapper[4691]: E1124 08:13:34.666429 4691 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-placement-api:current-podified" Nov 24 08:13:34 crc kubenswrapper[4691]: E1124 08:13:34.667350 4691 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wf86m,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-8g8rz_openstack(faf5645f-a25c-4bde-9769-51e1681b7eba): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 08:13:34 crc kubenswrapper[4691]: E1124 08:13:34.668762 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-8g8rz" podUID="faf5645f-a25c-4bde-9769-51e1681b7eba" Nov 24 08:13:34 crc kubenswrapper[4691]: I1124 08:13:34.797491 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7f64cf65df-8hgb4" Nov 24 08:13:34 crc kubenswrapper[4691]: I1124 08:13:34.808869 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-797fd4697-lsphq" Nov 24 08:13:34 crc kubenswrapper[4691]: I1124 08:13:34.946939 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4-horizon-secret-key\") pod \"7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4\" (UID: \"7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4\") " Nov 24 08:13:34 crc kubenswrapper[4691]: I1124 08:13:34.947000 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b581c7d3-dc44-4505-b3f7-fc7aff32f5df-scripts\") pod \"b581c7d3-dc44-4505-b3f7-fc7aff32f5df\" (UID: \"b581c7d3-dc44-4505-b3f7-fc7aff32f5df\") " Nov 24 08:13:34 crc kubenswrapper[4691]: I1124 08:13:34.947118 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b581c7d3-dc44-4505-b3f7-fc7aff32f5df-horizon-secret-key\") pod \"b581c7d3-dc44-4505-b3f7-fc7aff32f5df\" (UID: \"b581c7d3-dc44-4505-b3f7-fc7aff32f5df\") " Nov 24 08:13:34 crc kubenswrapper[4691]: I1124 08:13:34.947208 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4-scripts\") pod \"7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4\" (UID: \"7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4\") " Nov 24 08:13:34 crc kubenswrapper[4691]: I1124 08:13:34.947253 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vbqvg\" (UniqueName: \"kubernetes.io/projected/b581c7d3-dc44-4505-b3f7-fc7aff32f5df-kube-api-access-vbqvg\") pod \"b581c7d3-dc44-4505-b3f7-fc7aff32f5df\" (UID: \"b581c7d3-dc44-4505-b3f7-fc7aff32f5df\") " Nov 24 08:13:34 crc kubenswrapper[4691]: I1124 08:13:34.947292 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b581c7d3-dc44-4505-b3f7-fc7aff32f5df-logs\") pod \"b581c7d3-dc44-4505-b3f7-fc7aff32f5df\" (UID: \"b581c7d3-dc44-4505-b3f7-fc7aff32f5df\") " Nov 24 08:13:34 crc kubenswrapper[4691]: I1124 08:13:34.947357 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8ss7j\" (UniqueName: \"kubernetes.io/projected/7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4-kube-api-access-8ss7j\") pod \"7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4\" (UID: \"7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4\") " Nov 24 08:13:34 crc kubenswrapper[4691]: I1124 08:13:34.947402 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b581c7d3-dc44-4505-b3f7-fc7aff32f5df-config-data\") pod \"b581c7d3-dc44-4505-b3f7-fc7aff32f5df\" (UID: \"b581c7d3-dc44-4505-b3f7-fc7aff32f5df\") " Nov 24 08:13:34 crc kubenswrapper[4691]: I1124 08:13:34.947432 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4-config-data\") pod \"7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4\" (UID: \"7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4\") " Nov 24 08:13:34 crc kubenswrapper[4691]: I1124 08:13:34.947466 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4-logs\") pod \"7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4\" (UID: \"7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4\") " Nov 24 08:13:34 crc kubenswrapper[4691]: I1124 08:13:34.947890 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b581c7d3-dc44-4505-b3f7-fc7aff32f5df-logs" (OuterVolumeSpecName: "logs") pod "b581c7d3-dc44-4505-b3f7-fc7aff32f5df" (UID: "b581c7d3-dc44-4505-b3f7-fc7aff32f5df"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:13:34 crc kubenswrapper[4691]: I1124 08:13:34.948269 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4-scripts" (OuterVolumeSpecName: "scripts") pod "7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4" (UID: "7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:13:34 crc kubenswrapper[4691]: I1124 08:13:34.948264 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b581c7d3-dc44-4505-b3f7-fc7aff32f5df-scripts" (OuterVolumeSpecName: "scripts") pod "b581c7d3-dc44-4505-b3f7-fc7aff32f5df" (UID: "b581c7d3-dc44-4505-b3f7-fc7aff32f5df"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:13:34 crc kubenswrapper[4691]: I1124 08:13:34.948385 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4-config-data" (OuterVolumeSpecName: "config-data") pod "7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4" (UID: "7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:13:34 crc kubenswrapper[4691]: I1124 08:13:34.948521 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4-logs" (OuterVolumeSpecName: "logs") pod "7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4" (UID: "7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:13:34 crc kubenswrapper[4691]: I1124 08:13:34.948777 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b581c7d3-dc44-4505-b3f7-fc7aff32f5df-config-data" (OuterVolumeSpecName: "config-data") pod "b581c7d3-dc44-4505-b3f7-fc7aff32f5df" (UID: "b581c7d3-dc44-4505-b3f7-fc7aff32f5df"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:13:34 crc kubenswrapper[4691]: I1124 08:13:34.953433 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4-kube-api-access-8ss7j" (OuterVolumeSpecName: "kube-api-access-8ss7j") pod "7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4" (UID: "7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4"). InnerVolumeSpecName "kube-api-access-8ss7j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:13:34 crc kubenswrapper[4691]: I1124 08:13:34.953886 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b581c7d3-dc44-4505-b3f7-fc7aff32f5df-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "b581c7d3-dc44-4505-b3f7-fc7aff32f5df" (UID: "b581c7d3-dc44-4505-b3f7-fc7aff32f5df"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:34 crc kubenswrapper[4691]: I1124 08:13:34.953955 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b581c7d3-dc44-4505-b3f7-fc7aff32f5df-kube-api-access-vbqvg" (OuterVolumeSpecName: "kube-api-access-vbqvg") pod "b581c7d3-dc44-4505-b3f7-fc7aff32f5df" (UID: "b581c7d3-dc44-4505-b3f7-fc7aff32f5df"). InnerVolumeSpecName "kube-api-access-vbqvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:13:34 crc kubenswrapper[4691]: I1124 08:13:34.955579 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4" (UID: "7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:35 crc kubenswrapper[4691]: I1124 08:13:35.049378 4691 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:35 crc kubenswrapper[4691]: I1124 08:13:35.049418 4691 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b581c7d3-dc44-4505-b3f7-fc7aff32f5df-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:35 crc kubenswrapper[4691]: I1124 08:13:35.049429 4691 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b581c7d3-dc44-4505-b3f7-fc7aff32f5df-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:35 crc kubenswrapper[4691]: I1124 08:13:35.049439 4691 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:35 crc kubenswrapper[4691]: I1124 08:13:35.049477 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vbqvg\" (UniqueName: \"kubernetes.io/projected/b581c7d3-dc44-4505-b3f7-fc7aff32f5df-kube-api-access-vbqvg\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:35 crc kubenswrapper[4691]: I1124 08:13:35.049488 4691 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b581c7d3-dc44-4505-b3f7-fc7aff32f5df-logs\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:35 crc kubenswrapper[4691]: I1124 08:13:35.049496 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8ss7j\" (UniqueName: \"kubernetes.io/projected/7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4-kube-api-access-8ss7j\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:35 crc kubenswrapper[4691]: I1124 08:13:35.049505 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b581c7d3-dc44-4505-b3f7-fc7aff32f5df-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:35 crc kubenswrapper[4691]: I1124 08:13:35.049513 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:35 crc kubenswrapper[4691]: I1124 08:13:35.049523 4691 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4-logs\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:35 crc kubenswrapper[4691]: E1124 08:13:35.211650 4691 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Nov 24 08:13:35 crc kubenswrapper[4691]: E1124 08:13:35.211800 4691 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pjbwv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-qvhpm_openstack(09239709-f618-437f-a720-070aff572294): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 08:13:35 crc kubenswrapper[4691]: E1124 08:13:35.212978 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-qvhpm" podUID="09239709-f618-437f-a720-070aff572294" Nov 24 08:13:35 crc kubenswrapper[4691]: I1124 08:13:35.563360 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7f64cf65df-8hgb4" event={"ID":"b581c7d3-dc44-4505-b3f7-fc7aff32f5df","Type":"ContainerDied","Data":"0f47ebfc3dd12985c7619527ea620401dd7e7620f52553cbeaf92f855c6124bd"} Nov 24 08:13:35 crc kubenswrapper[4691]: I1124 08:13:35.563396 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7f64cf65df-8hgb4" Nov 24 08:13:35 crc kubenswrapper[4691]: I1124 08:13:35.564385 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-797fd4697-lsphq" event={"ID":"7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4","Type":"ContainerDied","Data":"f6fc6c0b9fa191e8ac35a61f3c9da0c6c251b49d23fc0f8c8c062f449c6bf7a1"} Nov 24 08:13:35 crc kubenswrapper[4691]: I1124 08:13:35.564471 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-797fd4697-lsphq" Nov 24 08:13:35 crc kubenswrapper[4691]: E1124 08:13:35.566162 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-placement-api:current-podified\\\"\"" pod="openstack/placement-db-sync-8g8rz" podUID="faf5645f-a25c-4bde-9769-51e1681b7eba" Nov 24 08:13:35 crc kubenswrapper[4691]: E1124 08:13:35.566245 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-qvhpm" podUID="09239709-f618-437f-a720-070aff572294" Nov 24 08:13:35 crc kubenswrapper[4691]: I1124 08:13:35.645438 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-7f64cf65df-8hgb4"] Nov 24 08:13:35 crc kubenswrapper[4691]: I1124 08:13:35.654627 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-7f64cf65df-8hgb4"] Nov 24 08:13:35 crc kubenswrapper[4691]: I1124 08:13:35.686801 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-797fd4697-lsphq"] Nov 24 08:13:35 crc kubenswrapper[4691]: I1124 08:13:35.694945 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-797fd4697-lsphq"] Nov 24 08:13:36 crc kubenswrapper[4691]: I1124 08:13:36.773097 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4" path="/var/lib/kubelet/pods/7fb778a4-c5d4-409c-a04c-d4f46d2a4cc4/volumes" Nov 24 08:13:36 crc kubenswrapper[4691]: I1124 08:13:36.774042 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b581c7d3-dc44-4505-b3f7-fc7aff32f5df" path="/var/lib/kubelet/pods/b581c7d3-dc44-4505-b3f7-fc7aff32f5df/volumes" Nov 24 08:13:38 crc kubenswrapper[4691]: I1124 08:13:38.593278 4691 generic.go:334] "Generic (PLEG): container finished" podID="e4bd742d-a7a1-402b-b1fa-9dde10e15952" containerID="3acae0443029415644f3322c8e8498e0a985411835b7279f62aa62e60b852762" exitCode=0 Nov 24 08:13:38 crc kubenswrapper[4691]: I1124 08:13:38.593361 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-4jnwp" event={"ID":"e4bd742d-a7a1-402b-b1fa-9dde10e15952","Type":"ContainerDied","Data":"3acae0443029415644f3322c8e8498e0a985411835b7279f62aa62e60b852762"} Nov 24 08:13:38 crc kubenswrapper[4691]: I1124 08:13:38.605234 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" podUID="e4184c5f-6573-4950-8920-d0b3d7aa2989" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.129:5353: connect: connection refused" Nov 24 08:13:38 crc kubenswrapper[4691]: I1124 08:13:38.605467 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" Nov 24 08:13:43 crc kubenswrapper[4691]: I1124 08:13:43.603800 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" podUID="e4184c5f-6573-4950-8920-d0b3d7aa2989" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.129:5353: connect: connection refused" Nov 24 08:13:45 crc kubenswrapper[4691]: E1124 08:13:45.168572 4691 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Nov 24 08:13:45 crc kubenswrapper[4691]: E1124 08:13:45.169018 4691 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-x4j42,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-vsrzv_openstack(29ff644c-aef6-4092-9dcf-1b4562e662d4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 24 08:13:45 crc kubenswrapper[4691]: E1124 08:13:45.170357 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-vsrzv" podUID="29ff644c-aef6-4092-9dcf-1b4562e662d4" Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.592488 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-4jnwp" Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.663983 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e4bd742d-a7a1-402b-b1fa-9dde10e15952-config\") pod \"e4bd742d-a7a1-402b-b1fa-9dde10e15952\" (UID: \"e4bd742d-a7a1-402b-b1fa-9dde10e15952\") " Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.675510 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.678745 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-4jnwp" event={"ID":"e4bd742d-a7a1-402b-b1fa-9dde10e15952","Type":"ContainerDied","Data":"20e58f4db05b47b155a5154ea9a2264b693bc113d7bc334307061af03020ea08"} Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.678797 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="20e58f4db05b47b155a5154ea9a2264b693bc113d7bc334307061af03020ea08" Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.678987 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-4jnwp" Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.690251 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" event={"ID":"e4184c5f-6573-4950-8920-d0b3d7aa2989","Type":"ContainerDied","Data":"56e162076a445e9f662424f614fd90e381ded222bdfafc57c0e7dbc19140d8c6"} Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.690343 4691 scope.go:117] "RemoveContainer" containerID="2a2a2562c36e38706ab1f8efc94201056f0d5cc564e742fbabbe272807054562" Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.690276 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f59b8f679-2vnf5" Nov 24 08:13:45 crc kubenswrapper[4691]: E1124 08:13:45.706787 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-vsrzv" podUID="29ff644c-aef6-4092-9dcf-1b4562e662d4" Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.747092 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4bd742d-a7a1-402b-b1fa-9dde10e15952-config" (OuterVolumeSpecName: "config") pod "e4bd742d-a7a1-402b-b1fa-9dde10e15952" (UID: "e4bd742d-a7a1-402b-b1fa-9dde10e15952"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.754973 4691 scope.go:117] "RemoveContainer" containerID="b3b73dfb520a13d0f0e064d9ef416b63b9832f8558885f558f784b79ed0c69b6" Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.770113 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e4184c5f-6573-4950-8920-d0b3d7aa2989-dns-swift-storage-0\") pod \"e4184c5f-6573-4950-8920-d0b3d7aa2989\" (UID: \"e4184c5f-6573-4950-8920-d0b3d7aa2989\") " Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.770299 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e4184c5f-6573-4950-8920-d0b3d7aa2989-ovsdbserver-nb\") pod \"e4184c5f-6573-4950-8920-d0b3d7aa2989\" (UID: \"e4184c5f-6573-4950-8920-d0b3d7aa2989\") " Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.770391 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cptqw\" (UniqueName: \"kubernetes.io/projected/e4bd742d-a7a1-402b-b1fa-9dde10e15952-kube-api-access-cptqw\") pod \"e4bd742d-a7a1-402b-b1fa-9dde10e15952\" (UID: \"e4bd742d-a7a1-402b-b1fa-9dde10e15952\") " Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.770540 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bknn9\" (UniqueName: \"kubernetes.io/projected/e4184c5f-6573-4950-8920-d0b3d7aa2989-kube-api-access-bknn9\") pod \"e4184c5f-6573-4950-8920-d0b3d7aa2989\" (UID: \"e4184c5f-6573-4950-8920-d0b3d7aa2989\") " Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.770577 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4184c5f-6573-4950-8920-d0b3d7aa2989-config\") pod \"e4184c5f-6573-4950-8920-d0b3d7aa2989\" (UID: \"e4184c5f-6573-4950-8920-d0b3d7aa2989\") " Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.770620 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e4184c5f-6573-4950-8920-d0b3d7aa2989-ovsdbserver-sb\") pod \"e4184c5f-6573-4950-8920-d0b3d7aa2989\" (UID: \"e4184c5f-6573-4950-8920-d0b3d7aa2989\") " Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.770644 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e4184c5f-6573-4950-8920-d0b3d7aa2989-dns-svc\") pod \"e4184c5f-6573-4950-8920-d0b3d7aa2989\" (UID: \"e4184c5f-6573-4950-8920-d0b3d7aa2989\") " Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.770693 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4bd742d-a7a1-402b-b1fa-9dde10e15952-combined-ca-bundle\") pod \"e4bd742d-a7a1-402b-b1fa-9dde10e15952\" (UID: \"e4bd742d-a7a1-402b-b1fa-9dde10e15952\") " Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.771351 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/e4bd742d-a7a1-402b-b1fa-9dde10e15952-config\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.832870 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e4184c5f-6573-4950-8920-d0b3d7aa2989-kube-api-access-bknn9" (OuterVolumeSpecName: "kube-api-access-bknn9") pod "e4184c5f-6573-4950-8920-d0b3d7aa2989" (UID: "e4184c5f-6573-4950-8920-d0b3d7aa2989"). InnerVolumeSpecName "kube-api-access-bknn9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.848614 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e4bd742d-a7a1-402b-b1fa-9dde10e15952-kube-api-access-cptqw" (OuterVolumeSpecName: "kube-api-access-cptqw") pod "e4bd742d-a7a1-402b-b1fa-9dde10e15952" (UID: "e4bd742d-a7a1-402b-b1fa-9dde10e15952"). InnerVolumeSpecName "kube-api-access-cptqw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.858253 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4bd742d-a7a1-402b-b1fa-9dde10e15952-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e4bd742d-a7a1-402b-b1fa-9dde10e15952" (UID: "e4bd742d-a7a1-402b-b1fa-9dde10e15952"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.875230 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cptqw\" (UniqueName: \"kubernetes.io/projected/e4bd742d-a7a1-402b-b1fa-9dde10e15952-kube-api-access-cptqw\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.875313 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bknn9\" (UniqueName: \"kubernetes.io/projected/e4184c5f-6573-4950-8920-d0b3d7aa2989-kube-api-access-bknn9\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.875350 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4bd742d-a7a1-402b-b1fa-9dde10e15952-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.886076 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e4184c5f-6573-4950-8920-d0b3d7aa2989-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e4184c5f-6573-4950-8920-d0b3d7aa2989" (UID: "e4184c5f-6573-4950-8920-d0b3d7aa2989"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.888090 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e4184c5f-6573-4950-8920-d0b3d7aa2989-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "e4184c5f-6573-4950-8920-d0b3d7aa2989" (UID: "e4184c5f-6573-4950-8920-d0b3d7aa2989"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.889246 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e4184c5f-6573-4950-8920-d0b3d7aa2989-config" (OuterVolumeSpecName: "config") pod "e4184c5f-6573-4950-8920-d0b3d7aa2989" (UID: "e4184c5f-6573-4950-8920-d0b3d7aa2989"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.895697 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e4184c5f-6573-4950-8920-d0b3d7aa2989-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e4184c5f-6573-4950-8920-d0b3d7aa2989" (UID: "e4184c5f-6573-4950-8920-d0b3d7aa2989"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.931234 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-77477f4d7b-kclfz"] Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.944097 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e4184c5f-6573-4950-8920-d0b3d7aa2989-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e4184c5f-6573-4950-8920-d0b3d7aa2989" (UID: "e4184c5f-6573-4950-8920-d0b3d7aa2989"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.947602 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5fb4677cdd-69rb6"] Nov 24 08:13:45 crc kubenswrapper[4691]: W1124 08:13:45.956917 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd1c22b4d_4593_461a_9096_f81674b136b7.slice/crio-8ea1acb5b1b4ccfc1d41c2d027dee9f2f64aef7bb56ce4940e41f2a93bbc4b59 WatchSource:0}: Error finding container 8ea1acb5b1b4ccfc1d41c2d027dee9f2f64aef7bb56ce4940e41f2a93bbc4b59: Status 404 returned error can't find the container with id 8ea1acb5b1b4ccfc1d41c2d027dee9f2f64aef7bb56ce4940e41f2a93bbc4b59 Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.959384 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-tx7zx"] Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.977105 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4184c5f-6573-4950-8920-d0b3d7aa2989-config\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.977148 4691 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e4184c5f-6573-4950-8920-d0b3d7aa2989-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.977159 4691 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e4184c5f-6573-4950-8920-d0b3d7aa2989-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.977171 4691 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e4184c5f-6573-4950-8920-d0b3d7aa2989-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.977181 4691 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e4184c5f-6573-4950-8920-d0b3d7aa2989-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:45 crc kubenswrapper[4691]: I1124 08:13:45.987911 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.057020 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f59b8f679-2vnf5"] Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.065391 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5f59b8f679-2vnf5"] Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.720442 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"998605ae-db3f-4eb2-9345-d5d940b82461","Type":"ContainerStarted","Data":"122cde65ca7574e0db746f6b72d2ed61b913de999397eceff5dc10dbc2780397"} Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.738804 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-77477f4d7b-kclfz" event={"ID":"567ed4cd-aaf3-4e52-be70-2f723075d545","Type":"ContainerStarted","Data":"6e9caea14320e2c5eb5fe2c000a17f511e9ec24ec9db6ea8fb6a9ec8334da824"} Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.738854 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-77477f4d7b-kclfz" event={"ID":"567ed4cd-aaf3-4e52-be70-2f723075d545","Type":"ContainerStarted","Data":"3572712a1270dccc8927c1bebc186c5962ee6c9011a6844588c0df16ab94736c"} Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.738868 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-77477f4d7b-kclfz" event={"ID":"567ed4cd-aaf3-4e52-be70-2f723075d545","Type":"ContainerStarted","Data":"1445263e4d5ac9005c9d3dc4b4ddbea844bbf4daa7fb8a192568a510ed21da16"} Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.741100 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3ad1b6c3-36a5-4991-b30f-092d3bf5018b","Type":"ContainerStarted","Data":"9d9ab5044ef815a0d40ad37118c7aaa2ab4e0bedf09936d4ea1717456adc8e9b"} Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.743297 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-658647c585-2tj4h" event={"ID":"3a983467-b9da-4795-8cae-f645d8e316b4","Type":"ContainerStarted","Data":"47b3ecd7f16371ae92a18c61d7953d59670620351a104f3da9f310ae7a82863d"} Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.743795 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-658647c585-2tj4h" event={"ID":"3a983467-b9da-4795-8cae-f645d8e316b4","Type":"ContainerStarted","Data":"1815f52c06ff4902d485d16863d6c8c6d29fc9b9047f32a7df18aaad0eec214e"} Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.743805 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-658647c585-2tj4h" podUID="3a983467-b9da-4795-8cae-f645d8e316b4" containerName="horizon-log" containerID="cri-o://1815f52c06ff4902d485d16863d6c8c6d29fc9b9047f32a7df18aaad0eec214e" gracePeriod=30 Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.743944 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-658647c585-2tj4h" podUID="3a983467-b9da-4795-8cae-f645d8e316b4" containerName="horizon" containerID="cri-o://47b3ecd7f16371ae92a18c61d7953d59670620351a104f3da9f310ae7a82863d" gracePeriod=30 Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.754231 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fb4677cdd-69rb6" event={"ID":"5f7435d6-aa83-41a0-b392-b06d77f53aa2","Type":"ContainerStarted","Data":"c00ba37c93ae0df618f914ad342f0b99ff6aa9864b98ac8e84d02d2335409c63"} Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.754261 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fb4677cdd-69rb6" event={"ID":"5f7435d6-aa83-41a0-b392-b06d77f53aa2","Type":"ContainerStarted","Data":"988318ecc8dd96212187b1c15d91137fb5e10d9e3f069247e54c44ed92192be3"} Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.754271 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fb4677cdd-69rb6" event={"ID":"5f7435d6-aa83-41a0-b392-b06d77f53aa2","Type":"ContainerStarted","Data":"1a8be44cee716b8649e22cdb7cacb1f0eb6290d7470cbea55fcd7f1d5540ff1e"} Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.803353 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e4184c5f-6573-4950-8920-d0b3d7aa2989" path="/var/lib/kubelet/pods/e4184c5f-6573-4950-8920-d0b3d7aa2989/volumes" Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.804957 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9f9499a7-a024-469f-a238-e4e3a60b4c9e","Type":"ContainerStarted","Data":"ca5d1cdde7d636f23b6af208b2dbb16997370ba17e105a1cb42f418809959dc6"} Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.824890 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-tx7zx" event={"ID":"d1c22b4d-4593-461a-9096-f81674b136b7","Type":"ContainerStarted","Data":"887c882bd1c813a99b7ee9710fdb9ffcb4a7f44ce0c7818337a53a27ae5d63ad"} Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.824936 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-tx7zx" event={"ID":"d1c22b4d-4593-461a-9096-f81674b136b7","Type":"ContainerStarted","Data":"8ea1acb5b1b4ccfc1d41c2d027dee9f2f64aef7bb56ce4940e41f2a93bbc4b59"} Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.834196 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-bqjww"] Nov 24 08:13:46 crc kubenswrapper[4691]: E1124 08:13:46.834630 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4bd742d-a7a1-402b-b1fa-9dde10e15952" containerName="neutron-db-sync" Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.834648 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4bd742d-a7a1-402b-b1fa-9dde10e15952" containerName="neutron-db-sync" Nov 24 08:13:46 crc kubenswrapper[4691]: E1124 08:13:46.834665 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4184c5f-6573-4950-8920-d0b3d7aa2989" containerName="init" Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.834671 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4184c5f-6573-4950-8920-d0b3d7aa2989" containerName="init" Nov 24 08:13:46 crc kubenswrapper[4691]: E1124 08:13:46.834681 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4184c5f-6573-4950-8920-d0b3d7aa2989" containerName="dnsmasq-dns" Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.834687 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4184c5f-6573-4950-8920-d0b3d7aa2989" containerName="dnsmasq-dns" Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.834856 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4184c5f-6573-4950-8920-d0b3d7aa2989" containerName="dnsmasq-dns" Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.835675 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4bd742d-a7a1-402b-b1fa-9dde10e15952" containerName="neutron-db-sync" Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.836639 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-bqjww" Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.889098 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-77477f4d7b-kclfz" podStartSLOduration=23.889069858 podStartE2EDuration="23.889069858s" podCreationTimestamp="2025-11-24 08:13:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:13:46.776406543 +0000 UTC m=+988.775355822" watchObservedRunningTime="2025-11-24 08:13:46.889069858 +0000 UTC m=+988.888019107" Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.910968 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-bqjww"] Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.915655 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-658647c585-2tj4h" podStartSLOduration=2.932634432 podStartE2EDuration="28.915632368s" podCreationTimestamp="2025-11-24 08:13:18 +0000 UTC" firstStartedPulling="2025-11-24 08:13:19.227941678 +0000 UTC m=+961.226890917" lastFinishedPulling="2025-11-24 08:13:45.210939604 +0000 UTC m=+987.209888853" observedRunningTime="2025-11-24 08:13:46.819015758 +0000 UTC m=+988.817965007" watchObservedRunningTime="2025-11-24 08:13:46.915632368 +0000 UTC m=+988.914581617" Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.924663 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-5fb4677cdd-69rb6" podStartSLOduration=23.924646319 podStartE2EDuration="23.924646319s" podCreationTimestamp="2025-11-24 08:13:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:13:46.875374011 +0000 UTC m=+988.874323260" watchObservedRunningTime="2025-11-24 08:13:46.924646319 +0000 UTC m=+988.923595568" Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.940561 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-7fc5776b84-69d6x"] Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.942510 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7fc5776b84-69d6x" Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.949493 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.949678 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.949789 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.953100 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-k562q" Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.955862 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7fc5776b84-69d6x"] Nov 24 08:13:46 crc kubenswrapper[4691]: I1124 08:13:46.972309 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-tx7zx" podStartSLOduration=21.97228551 podStartE2EDuration="21.97228551s" podCreationTimestamp="2025-11-24 08:13:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:13:46.927760539 +0000 UTC m=+988.926709798" watchObservedRunningTime="2025-11-24 08:13:46.97228551 +0000 UTC m=+988.971234759" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.006645 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/060626b1-822b-4c3a-a1b0-a1c14fb04c18-dns-svc\") pod \"dnsmasq-dns-6b7b667979-bqjww\" (UID: \"060626b1-822b-4c3a-a1b0-a1c14fb04c18\") " pod="openstack/dnsmasq-dns-6b7b667979-bqjww" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.006720 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/060626b1-822b-4c3a-a1b0-a1c14fb04c18-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7b667979-bqjww\" (UID: \"060626b1-822b-4c3a-a1b0-a1c14fb04c18\") " pod="openstack/dnsmasq-dns-6b7b667979-bqjww" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.006788 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/060626b1-822b-4c3a-a1b0-a1c14fb04c18-config\") pod \"dnsmasq-dns-6b7b667979-bqjww\" (UID: \"060626b1-822b-4c3a-a1b0-a1c14fb04c18\") " pod="openstack/dnsmasq-dns-6b7b667979-bqjww" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.006845 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/060626b1-822b-4c3a-a1b0-a1c14fb04c18-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7b667979-bqjww\" (UID: \"060626b1-822b-4c3a-a1b0-a1c14fb04c18\") " pod="openstack/dnsmasq-dns-6b7b667979-bqjww" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.006867 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vbds4\" (UniqueName: \"kubernetes.io/projected/060626b1-822b-4c3a-a1b0-a1c14fb04c18-kube-api-access-vbds4\") pod \"dnsmasq-dns-6b7b667979-bqjww\" (UID: \"060626b1-822b-4c3a-a1b0-a1c14fb04c18\") " pod="openstack/dnsmasq-dns-6b7b667979-bqjww" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.006915 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/060626b1-822b-4c3a-a1b0-a1c14fb04c18-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7b667979-bqjww\" (UID: \"060626b1-822b-4c3a-a1b0-a1c14fb04c18\") " pod="openstack/dnsmasq-dns-6b7b667979-bqjww" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.109847 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/060626b1-822b-4c3a-a1b0-a1c14fb04c18-config\") pod \"dnsmasq-dns-6b7b667979-bqjww\" (UID: \"060626b1-822b-4c3a-a1b0-a1c14fb04c18\") " pod="openstack/dnsmasq-dns-6b7b667979-bqjww" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.109931 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/019368b7-2336-4105-a06f-a05ce4cdcc60-httpd-config\") pod \"neutron-7fc5776b84-69d6x\" (UID: \"019368b7-2336-4105-a06f-a05ce4cdcc60\") " pod="openstack/neutron-7fc5776b84-69d6x" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.109994 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/060626b1-822b-4c3a-a1b0-a1c14fb04c18-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7b667979-bqjww\" (UID: \"060626b1-822b-4c3a-a1b0-a1c14fb04c18\") " pod="openstack/dnsmasq-dns-6b7b667979-bqjww" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.110022 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vbds4\" (UniqueName: \"kubernetes.io/projected/060626b1-822b-4c3a-a1b0-a1c14fb04c18-kube-api-access-vbds4\") pod \"dnsmasq-dns-6b7b667979-bqjww\" (UID: \"060626b1-822b-4c3a-a1b0-a1c14fb04c18\") " pod="openstack/dnsmasq-dns-6b7b667979-bqjww" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.110061 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/019368b7-2336-4105-a06f-a05ce4cdcc60-config\") pod \"neutron-7fc5776b84-69d6x\" (UID: \"019368b7-2336-4105-a06f-a05ce4cdcc60\") " pod="openstack/neutron-7fc5776b84-69d6x" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.110094 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/019368b7-2336-4105-a06f-a05ce4cdcc60-combined-ca-bundle\") pod \"neutron-7fc5776b84-69d6x\" (UID: \"019368b7-2336-4105-a06f-a05ce4cdcc60\") " pod="openstack/neutron-7fc5776b84-69d6x" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.110123 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/019368b7-2336-4105-a06f-a05ce4cdcc60-ovndb-tls-certs\") pod \"neutron-7fc5776b84-69d6x\" (UID: \"019368b7-2336-4105-a06f-a05ce4cdcc60\") " pod="openstack/neutron-7fc5776b84-69d6x" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.110146 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/060626b1-822b-4c3a-a1b0-a1c14fb04c18-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7b667979-bqjww\" (UID: \"060626b1-822b-4c3a-a1b0-a1c14fb04c18\") " pod="openstack/dnsmasq-dns-6b7b667979-bqjww" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.110183 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrrbs\" (UniqueName: \"kubernetes.io/projected/019368b7-2336-4105-a06f-a05ce4cdcc60-kube-api-access-nrrbs\") pod \"neutron-7fc5776b84-69d6x\" (UID: \"019368b7-2336-4105-a06f-a05ce4cdcc60\") " pod="openstack/neutron-7fc5776b84-69d6x" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.110243 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/060626b1-822b-4c3a-a1b0-a1c14fb04c18-dns-svc\") pod \"dnsmasq-dns-6b7b667979-bqjww\" (UID: \"060626b1-822b-4c3a-a1b0-a1c14fb04c18\") " pod="openstack/dnsmasq-dns-6b7b667979-bqjww" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.111851 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/060626b1-822b-4c3a-a1b0-a1c14fb04c18-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7b667979-bqjww\" (UID: \"060626b1-822b-4c3a-a1b0-a1c14fb04c18\") " pod="openstack/dnsmasq-dns-6b7b667979-bqjww" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.112668 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/060626b1-822b-4c3a-a1b0-a1c14fb04c18-config\") pod \"dnsmasq-dns-6b7b667979-bqjww\" (UID: \"060626b1-822b-4c3a-a1b0-a1c14fb04c18\") " pod="openstack/dnsmasq-dns-6b7b667979-bqjww" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.113018 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/060626b1-822b-4c3a-a1b0-a1c14fb04c18-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7b667979-bqjww\" (UID: \"060626b1-822b-4c3a-a1b0-a1c14fb04c18\") " pod="openstack/dnsmasq-dns-6b7b667979-bqjww" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.114789 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/060626b1-822b-4c3a-a1b0-a1c14fb04c18-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7b667979-bqjww\" (UID: \"060626b1-822b-4c3a-a1b0-a1c14fb04c18\") " pod="openstack/dnsmasq-dns-6b7b667979-bqjww" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.117154 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/060626b1-822b-4c3a-a1b0-a1c14fb04c18-dns-svc\") pod \"dnsmasq-dns-6b7b667979-bqjww\" (UID: \"060626b1-822b-4c3a-a1b0-a1c14fb04c18\") " pod="openstack/dnsmasq-dns-6b7b667979-bqjww" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.117316 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/060626b1-822b-4c3a-a1b0-a1c14fb04c18-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7b667979-bqjww\" (UID: \"060626b1-822b-4c3a-a1b0-a1c14fb04c18\") " pod="openstack/dnsmasq-dns-6b7b667979-bqjww" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.150421 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vbds4\" (UniqueName: \"kubernetes.io/projected/060626b1-822b-4c3a-a1b0-a1c14fb04c18-kube-api-access-vbds4\") pod \"dnsmasq-dns-6b7b667979-bqjww\" (UID: \"060626b1-822b-4c3a-a1b0-a1c14fb04c18\") " pod="openstack/dnsmasq-dns-6b7b667979-bqjww" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.207060 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-bqjww" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.214080 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/019368b7-2336-4105-a06f-a05ce4cdcc60-httpd-config\") pod \"neutron-7fc5776b84-69d6x\" (UID: \"019368b7-2336-4105-a06f-a05ce4cdcc60\") " pod="openstack/neutron-7fc5776b84-69d6x" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.214159 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/019368b7-2336-4105-a06f-a05ce4cdcc60-config\") pod \"neutron-7fc5776b84-69d6x\" (UID: \"019368b7-2336-4105-a06f-a05ce4cdcc60\") " pod="openstack/neutron-7fc5776b84-69d6x" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.214188 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/019368b7-2336-4105-a06f-a05ce4cdcc60-combined-ca-bundle\") pod \"neutron-7fc5776b84-69d6x\" (UID: \"019368b7-2336-4105-a06f-a05ce4cdcc60\") " pod="openstack/neutron-7fc5776b84-69d6x" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.214207 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/019368b7-2336-4105-a06f-a05ce4cdcc60-ovndb-tls-certs\") pod \"neutron-7fc5776b84-69d6x\" (UID: \"019368b7-2336-4105-a06f-a05ce4cdcc60\") " pod="openstack/neutron-7fc5776b84-69d6x" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.214237 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrrbs\" (UniqueName: \"kubernetes.io/projected/019368b7-2336-4105-a06f-a05ce4cdcc60-kube-api-access-nrrbs\") pod \"neutron-7fc5776b84-69d6x\" (UID: \"019368b7-2336-4105-a06f-a05ce4cdcc60\") " pod="openstack/neutron-7fc5776b84-69d6x" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.219902 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/019368b7-2336-4105-a06f-a05ce4cdcc60-httpd-config\") pod \"neutron-7fc5776b84-69d6x\" (UID: \"019368b7-2336-4105-a06f-a05ce4cdcc60\") " pod="openstack/neutron-7fc5776b84-69d6x" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.223427 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/019368b7-2336-4105-a06f-a05ce4cdcc60-config\") pod \"neutron-7fc5776b84-69d6x\" (UID: \"019368b7-2336-4105-a06f-a05ce4cdcc60\") " pod="openstack/neutron-7fc5776b84-69d6x" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.223487 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/019368b7-2336-4105-a06f-a05ce4cdcc60-ovndb-tls-certs\") pod \"neutron-7fc5776b84-69d6x\" (UID: \"019368b7-2336-4105-a06f-a05ce4cdcc60\") " pod="openstack/neutron-7fc5776b84-69d6x" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.224105 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/019368b7-2336-4105-a06f-a05ce4cdcc60-combined-ca-bundle\") pod \"neutron-7fc5776b84-69d6x\" (UID: \"019368b7-2336-4105-a06f-a05ce4cdcc60\") " pod="openstack/neutron-7fc5776b84-69d6x" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.242328 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrrbs\" (UniqueName: \"kubernetes.io/projected/019368b7-2336-4105-a06f-a05ce4cdcc60-kube-api-access-nrrbs\") pod \"neutron-7fc5776b84-69d6x\" (UID: \"019368b7-2336-4105-a06f-a05ce4cdcc60\") " pod="openstack/neutron-7fc5776b84-69d6x" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.296935 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7fc5776b84-69d6x" Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.836154 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9f9499a7-a024-469f-a238-e4e3a60b4c9e","Type":"ContainerStarted","Data":"543844ececd8030c8113832fed659222da6eaf18292f29882c20cc745fc0ad0a"} Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.836602 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9f9499a7-a024-469f-a238-e4e3a60b4c9e","Type":"ContainerStarted","Data":"28a36fbda116508d498b25f7b17ef6750e8fe0a9de58a350b0e49849fda26229"} Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.840116 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"998605ae-db3f-4eb2-9345-d5d940b82461","Type":"ContainerStarted","Data":"80d5c83e335bf67fd47a707f654b574f60c0903620939838d0fd23add3dddde2"} Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.840689 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="998605ae-db3f-4eb2-9345-d5d940b82461" containerName="glance-log" containerID="cri-o://122cde65ca7574e0db746f6b72d2ed61b913de999397eceff5dc10dbc2780397" gracePeriod=30 Nov 24 08:13:47 crc kubenswrapper[4691]: I1124 08:13:47.840878 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="998605ae-db3f-4eb2-9345-d5d940b82461" containerName="glance-httpd" containerID="cri-o://80d5c83e335bf67fd47a707f654b574f60c0903620939838d0fd23add3dddde2" gracePeriod=30 Nov 24 08:13:48 crc kubenswrapper[4691]: I1124 08:13:48.442871 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-658647c585-2tj4h" Nov 24 08:13:48 crc kubenswrapper[4691]: I1124 08:13:48.785115 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=28.785094036 podStartE2EDuration="28.785094036s" podCreationTimestamp="2025-11-24 08:13:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:13:47.865322721 +0000 UTC m=+989.864271990" watchObservedRunningTime="2025-11-24 08:13:48.785094036 +0000 UTC m=+990.784043285" Nov 24 08:13:48 crc kubenswrapper[4691]: I1124 08:13:48.822083 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 24 08:13:48 crc kubenswrapper[4691]: I1124 08:13:48.835291 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-bqjww"] Nov 24 08:13:48 crc kubenswrapper[4691]: I1124 08:13:48.911227 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-qvhpm" event={"ID":"09239709-f618-437f-a720-070aff572294","Type":"ContainerStarted","Data":"ece511d3f226f171956b90efd3e6ccc5790855743bad985a4cf5a2e3be617515"} Nov 24 08:13:48 crc kubenswrapper[4691]: I1124 08:13:48.920961 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-bqjww" event={"ID":"060626b1-822b-4c3a-a1b0-a1c14fb04c18","Type":"ContainerStarted","Data":"1fa3f21881b2f293c8796d1671904b83c896f93a25149f2080ac71f7808563f2"} Nov 24 08:13:48 crc kubenswrapper[4691]: I1124 08:13:48.931726 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-8g8rz" event={"ID":"faf5645f-a25c-4bde-9769-51e1681b7eba","Type":"ContainerStarted","Data":"50c8d054ee05ed39c26785d05b501867d1dcf4ff7d3640dbd3b26d4295f2a892"} Nov 24 08:13:48 crc kubenswrapper[4691]: I1124 08:13:48.936271 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-qvhpm" podStartSLOduration=2.966499686 podStartE2EDuration="34.936248757s" podCreationTimestamp="2025-11-24 08:13:14 +0000 UTC" firstStartedPulling="2025-11-24 08:13:16.533020404 +0000 UTC m=+958.531969653" lastFinishedPulling="2025-11-24 08:13:48.502769475 +0000 UTC m=+990.501718724" observedRunningTime="2025-11-24 08:13:48.934194147 +0000 UTC m=+990.933143396" watchObservedRunningTime="2025-11-24 08:13:48.936248757 +0000 UTC m=+990.935198016" Nov 24 08:13:48 crc kubenswrapper[4691]: I1124 08:13:48.956780 4691 generic.go:334] "Generic (PLEG): container finished" podID="998605ae-db3f-4eb2-9345-d5d940b82461" containerID="80d5c83e335bf67fd47a707f654b574f60c0903620939838d0fd23add3dddde2" exitCode=0 Nov 24 08:13:48 crc kubenswrapper[4691]: I1124 08:13:48.956827 4691 generic.go:334] "Generic (PLEG): container finished" podID="998605ae-db3f-4eb2-9345-d5d940b82461" containerID="122cde65ca7574e0db746f6b72d2ed61b913de999397eceff5dc10dbc2780397" exitCode=143 Nov 24 08:13:48 crc kubenswrapper[4691]: I1124 08:13:48.957013 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="9f9499a7-a024-469f-a238-e4e3a60b4c9e" containerName="glance-log" containerID="cri-o://543844ececd8030c8113832fed659222da6eaf18292f29882c20cc745fc0ad0a" gracePeriod=30 Nov 24 08:13:48 crc kubenswrapper[4691]: I1124 08:13:48.957335 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="9f9499a7-a024-469f-a238-e4e3a60b4c9e" containerName="glance-httpd" containerID="cri-o://28a36fbda116508d498b25f7b17ef6750e8fe0a9de58a350b0e49849fda26229" gracePeriod=30 Nov 24 08:13:48 crc kubenswrapper[4691]: I1124 08:13:48.957406 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 24 08:13:48 crc kubenswrapper[4691]: I1124 08:13:48.957425 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"998605ae-db3f-4eb2-9345-d5d940b82461","Type":"ContainerDied","Data":"80d5c83e335bf67fd47a707f654b574f60c0903620939838d0fd23add3dddde2"} Nov 24 08:13:48 crc kubenswrapper[4691]: I1124 08:13:48.957480 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"998605ae-db3f-4eb2-9345-d5d940b82461","Type":"ContainerDied","Data":"122cde65ca7574e0db746f6b72d2ed61b913de999397eceff5dc10dbc2780397"} Nov 24 08:13:48 crc kubenswrapper[4691]: I1124 08:13:48.957502 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"998605ae-db3f-4eb2-9345-d5d940b82461","Type":"ContainerDied","Data":"37b4404b8d6dd52702c87d8b7026e9a6a9502ec9970a92e8a729c5adfb8f2766"} Nov 24 08:13:48 crc kubenswrapper[4691]: I1124 08:13:48.957526 4691 scope.go:117] "RemoveContainer" containerID="80d5c83e335bf67fd47a707f654b574f60c0903620939838d0fd23add3dddde2" Nov 24 08:13:48 crc kubenswrapper[4691]: I1124 08:13:48.976704 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qkvck\" (UniqueName: \"kubernetes.io/projected/998605ae-db3f-4eb2-9345-d5d940b82461-kube-api-access-qkvck\") pod \"998605ae-db3f-4eb2-9345-d5d940b82461\" (UID: \"998605ae-db3f-4eb2-9345-d5d940b82461\") " Nov 24 08:13:48 crc kubenswrapper[4691]: I1124 08:13:48.976773 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/998605ae-db3f-4eb2-9345-d5d940b82461-scripts\") pod \"998605ae-db3f-4eb2-9345-d5d940b82461\" (UID: \"998605ae-db3f-4eb2-9345-d5d940b82461\") " Nov 24 08:13:48 crc kubenswrapper[4691]: I1124 08:13:48.976819 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/998605ae-db3f-4eb2-9345-d5d940b82461-public-tls-certs\") pod \"998605ae-db3f-4eb2-9345-d5d940b82461\" (UID: \"998605ae-db3f-4eb2-9345-d5d940b82461\") " Nov 24 08:13:48 crc kubenswrapper[4691]: I1124 08:13:48.976928 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/998605ae-db3f-4eb2-9345-d5d940b82461-httpd-run\") pod \"998605ae-db3f-4eb2-9345-d5d940b82461\" (UID: \"998605ae-db3f-4eb2-9345-d5d940b82461\") " Nov 24 08:13:48 crc kubenswrapper[4691]: I1124 08:13:48.976955 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/998605ae-db3f-4eb2-9345-d5d940b82461-logs\") pod \"998605ae-db3f-4eb2-9345-d5d940b82461\" (UID: \"998605ae-db3f-4eb2-9345-d5d940b82461\") " Nov 24 08:13:48 crc kubenswrapper[4691]: I1124 08:13:48.976975 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"998605ae-db3f-4eb2-9345-d5d940b82461\" (UID: \"998605ae-db3f-4eb2-9345-d5d940b82461\") " Nov 24 08:13:48 crc kubenswrapper[4691]: I1124 08:13:48.977051 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/998605ae-db3f-4eb2-9345-d5d940b82461-config-data\") pod \"998605ae-db3f-4eb2-9345-d5d940b82461\" (UID: \"998605ae-db3f-4eb2-9345-d5d940b82461\") " Nov 24 08:13:48 crc kubenswrapper[4691]: I1124 08:13:48.977084 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/998605ae-db3f-4eb2-9345-d5d940b82461-combined-ca-bundle\") pod \"998605ae-db3f-4eb2-9345-d5d940b82461\" (UID: \"998605ae-db3f-4eb2-9345-d5d940b82461\") " Nov 24 08:13:48 crc kubenswrapper[4691]: I1124 08:13:48.978026 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-8g8rz" podStartSLOduration=3.502139339 podStartE2EDuration="34.978002167s" podCreationTimestamp="2025-11-24 08:13:14 +0000 UTC" firstStartedPulling="2025-11-24 08:13:16.782866035 +0000 UTC m=+958.781815284" lastFinishedPulling="2025-11-24 08:13:48.258728863 +0000 UTC m=+990.257678112" observedRunningTime="2025-11-24 08:13:48.961977443 +0000 UTC m=+990.960926692" watchObservedRunningTime="2025-11-24 08:13:48.978002167 +0000 UTC m=+990.976951426" Nov 24 08:13:48 crc kubenswrapper[4691]: I1124 08:13:48.980790 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/998605ae-db3f-4eb2-9345-d5d940b82461-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "998605ae-db3f-4eb2-9345-d5d940b82461" (UID: "998605ae-db3f-4eb2-9345-d5d940b82461"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:13:48 crc kubenswrapper[4691]: I1124 08:13:48.984743 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/998605ae-db3f-4eb2-9345-d5d940b82461-logs" (OuterVolumeSpecName: "logs") pod "998605ae-db3f-4eb2-9345-d5d940b82461" (UID: "998605ae-db3f-4eb2-9345-d5d940b82461"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:13:48 crc kubenswrapper[4691]: I1124 08:13:48.990424 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/998605ae-db3f-4eb2-9345-d5d940b82461-kube-api-access-qkvck" (OuterVolumeSpecName: "kube-api-access-qkvck") pod "998605ae-db3f-4eb2-9345-d5d940b82461" (UID: "998605ae-db3f-4eb2-9345-d5d940b82461"). InnerVolumeSpecName "kube-api-access-qkvck". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.009920 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "998605ae-db3f-4eb2-9345-d5d940b82461" (UID: "998605ae-db3f-4eb2-9345-d5d940b82461"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.015377 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/998605ae-db3f-4eb2-9345-d5d940b82461-scripts" (OuterVolumeSpecName: "scripts") pod "998605ae-db3f-4eb2-9345-d5d940b82461" (UID: "998605ae-db3f-4eb2-9345-d5d940b82461"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.016242 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=28.016219055 podStartE2EDuration="28.016219055s" podCreationTimestamp="2025-11-24 08:13:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:13:48.996451632 +0000 UTC m=+990.995400881" watchObservedRunningTime="2025-11-24 08:13:49.016219055 +0000 UTC m=+991.015168304" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.048227 4691 scope.go:117] "RemoveContainer" containerID="122cde65ca7574e0db746f6b72d2ed61b913de999397eceff5dc10dbc2780397" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.052208 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7fc5776b84-69d6x"] Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.064620 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/998605ae-db3f-4eb2-9345-d5d940b82461-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "998605ae-db3f-4eb2-9345-d5d940b82461" (UID: "998605ae-db3f-4eb2-9345-d5d940b82461"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.079716 4691 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/998605ae-db3f-4eb2-9345-d5d940b82461-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.079751 4691 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/998605ae-db3f-4eb2-9345-d5d940b82461-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.079762 4691 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/998605ae-db3f-4eb2-9345-d5d940b82461-logs\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.079781 4691 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.079791 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/998605ae-db3f-4eb2-9345-d5d940b82461-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.079802 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qkvck\" (UniqueName: \"kubernetes.io/projected/998605ae-db3f-4eb2-9345-d5d940b82461-kube-api-access-qkvck\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.101380 4691 scope.go:117] "RemoveContainer" containerID="80d5c83e335bf67fd47a707f654b574f60c0903620939838d0fd23add3dddde2" Nov 24 08:13:49 crc kubenswrapper[4691]: E1124 08:13:49.103637 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"80d5c83e335bf67fd47a707f654b574f60c0903620939838d0fd23add3dddde2\": container with ID starting with 80d5c83e335bf67fd47a707f654b574f60c0903620939838d0fd23add3dddde2 not found: ID does not exist" containerID="80d5c83e335bf67fd47a707f654b574f60c0903620939838d0fd23add3dddde2" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.103689 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"80d5c83e335bf67fd47a707f654b574f60c0903620939838d0fd23add3dddde2"} err="failed to get container status \"80d5c83e335bf67fd47a707f654b574f60c0903620939838d0fd23add3dddde2\": rpc error: code = NotFound desc = could not find container \"80d5c83e335bf67fd47a707f654b574f60c0903620939838d0fd23add3dddde2\": container with ID starting with 80d5c83e335bf67fd47a707f654b574f60c0903620939838d0fd23add3dddde2 not found: ID does not exist" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.103715 4691 scope.go:117] "RemoveContainer" containerID="122cde65ca7574e0db746f6b72d2ed61b913de999397eceff5dc10dbc2780397" Nov 24 08:13:49 crc kubenswrapper[4691]: E1124 08:13:49.108672 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"122cde65ca7574e0db746f6b72d2ed61b913de999397eceff5dc10dbc2780397\": container with ID starting with 122cde65ca7574e0db746f6b72d2ed61b913de999397eceff5dc10dbc2780397 not found: ID does not exist" containerID="122cde65ca7574e0db746f6b72d2ed61b913de999397eceff5dc10dbc2780397" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.108719 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"122cde65ca7574e0db746f6b72d2ed61b913de999397eceff5dc10dbc2780397"} err="failed to get container status \"122cde65ca7574e0db746f6b72d2ed61b913de999397eceff5dc10dbc2780397\": rpc error: code = NotFound desc = could not find container \"122cde65ca7574e0db746f6b72d2ed61b913de999397eceff5dc10dbc2780397\": container with ID starting with 122cde65ca7574e0db746f6b72d2ed61b913de999397eceff5dc10dbc2780397 not found: ID does not exist" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.108751 4691 scope.go:117] "RemoveContainer" containerID="80d5c83e335bf67fd47a707f654b574f60c0903620939838d0fd23add3dddde2" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.109130 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"80d5c83e335bf67fd47a707f654b574f60c0903620939838d0fd23add3dddde2"} err="failed to get container status \"80d5c83e335bf67fd47a707f654b574f60c0903620939838d0fd23add3dddde2\": rpc error: code = NotFound desc = could not find container \"80d5c83e335bf67fd47a707f654b574f60c0903620939838d0fd23add3dddde2\": container with ID starting with 80d5c83e335bf67fd47a707f654b574f60c0903620939838d0fd23add3dddde2 not found: ID does not exist" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.109172 4691 scope.go:117] "RemoveContainer" containerID="122cde65ca7574e0db746f6b72d2ed61b913de999397eceff5dc10dbc2780397" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.109487 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"122cde65ca7574e0db746f6b72d2ed61b913de999397eceff5dc10dbc2780397"} err="failed to get container status \"122cde65ca7574e0db746f6b72d2ed61b913de999397eceff5dc10dbc2780397\": rpc error: code = NotFound desc = could not find container \"122cde65ca7574e0db746f6b72d2ed61b913de999397eceff5dc10dbc2780397\": container with ID starting with 122cde65ca7574e0db746f6b72d2ed61b913de999397eceff5dc10dbc2780397 not found: ID does not exist" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.117234 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/998605ae-db3f-4eb2-9345-d5d940b82461-config-data" (OuterVolumeSpecName: "config-data") pod "998605ae-db3f-4eb2-9345-d5d940b82461" (UID: "998605ae-db3f-4eb2-9345-d5d940b82461"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.152139 4691 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.159679 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/998605ae-db3f-4eb2-9345-d5d940b82461-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "998605ae-db3f-4eb2-9345-d5d940b82461" (UID: "998605ae-db3f-4eb2-9345-d5d940b82461"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.181160 4691 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/998605ae-db3f-4eb2-9345-d5d940b82461-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.181192 4691 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.181205 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/998605ae-db3f-4eb2-9345-d5d940b82461-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.455610 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.485224 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.528244 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 08:13:49 crc kubenswrapper[4691]: E1124 08:13:49.529216 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="998605ae-db3f-4eb2-9345-d5d940b82461" containerName="glance-httpd" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.529232 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="998605ae-db3f-4eb2-9345-d5d940b82461" containerName="glance-httpd" Nov 24 08:13:49 crc kubenswrapper[4691]: E1124 08:13:49.529242 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="998605ae-db3f-4eb2-9345-d5d940b82461" containerName="glance-log" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.529247 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="998605ae-db3f-4eb2-9345-d5d940b82461" containerName="glance-log" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.529638 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="998605ae-db3f-4eb2-9345-d5d940b82461" containerName="glance-log" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.529660 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="998605ae-db3f-4eb2-9345-d5d940b82461" containerName="glance-httpd" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.534952 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.536832 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.537989 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.541779 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.714511 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-57b84ccfdc-qnsn7"] Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.716045 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-57b84ccfdc-qnsn7" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.731169 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.731459 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.732921 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/08ef6b82-8aff-4b11-a0be-9e04670b96b7-scripts\") pod \"glance-default-external-api-0\" (UID: \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.732967 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08ef6b82-8aff-4b11-a0be-9e04670b96b7-config-data\") pod \"glance-default-external-api-0\" (UID: \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.733015 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.733144 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08ef6b82-8aff-4b11-a0be-9e04670b96b7-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.733314 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/08ef6b82-8aff-4b11-a0be-9e04670b96b7-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.733566 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08ef6b82-8aff-4b11-a0be-9e04670b96b7-logs\") pod \"glance-default-external-api-0\" (UID: \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.733593 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/08ef6b82-8aff-4b11-a0be-9e04670b96b7-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.733627 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dnv92\" (UniqueName: \"kubernetes.io/projected/08ef6b82-8aff-4b11-a0be-9e04670b96b7-kube-api-access-dnv92\") pod \"glance-default-external-api-0\" (UID: \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.734933 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-57b84ccfdc-qnsn7"] Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.784507 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.834790 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8j85\" (UniqueName: \"kubernetes.io/projected/8912bda5-405a-472b-a80f-2140a7bb0ded-kube-api-access-h8j85\") pod \"neutron-57b84ccfdc-qnsn7\" (UID: \"8912bda5-405a-472b-a80f-2140a7bb0ded\") " pod="openstack/neutron-57b84ccfdc-qnsn7" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.834844 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/08ef6b82-8aff-4b11-a0be-9e04670b96b7-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.834877 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dnv92\" (UniqueName: \"kubernetes.io/projected/08ef6b82-8aff-4b11-a0be-9e04670b96b7-kube-api-access-dnv92\") pod \"glance-default-external-api-0\" (UID: \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.834939 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/08ef6b82-8aff-4b11-a0be-9e04670b96b7-scripts\") pod \"glance-default-external-api-0\" (UID: \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.834957 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8912bda5-405a-472b-a80f-2140a7bb0ded-combined-ca-bundle\") pod \"neutron-57b84ccfdc-qnsn7\" (UID: \"8912bda5-405a-472b-a80f-2140a7bb0ded\") " pod="openstack/neutron-57b84ccfdc-qnsn7" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.834974 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08ef6b82-8aff-4b11-a0be-9e04670b96b7-config-data\") pod \"glance-default-external-api-0\" (UID: \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.835004 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.835033 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08ef6b82-8aff-4b11-a0be-9e04670b96b7-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.835100 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8912bda5-405a-472b-a80f-2140a7bb0ded-internal-tls-certs\") pod \"neutron-57b84ccfdc-qnsn7\" (UID: \"8912bda5-405a-472b-a80f-2140a7bb0ded\") " pod="openstack/neutron-57b84ccfdc-qnsn7" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.835123 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/8912bda5-405a-472b-a80f-2140a7bb0ded-config\") pod \"neutron-57b84ccfdc-qnsn7\" (UID: \"8912bda5-405a-472b-a80f-2140a7bb0ded\") " pod="openstack/neutron-57b84ccfdc-qnsn7" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.835150 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/08ef6b82-8aff-4b11-a0be-9e04670b96b7-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.835180 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/8912bda5-405a-472b-a80f-2140a7bb0ded-httpd-config\") pod \"neutron-57b84ccfdc-qnsn7\" (UID: \"8912bda5-405a-472b-a80f-2140a7bb0ded\") " pod="openstack/neutron-57b84ccfdc-qnsn7" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.835199 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8912bda5-405a-472b-a80f-2140a7bb0ded-ovndb-tls-certs\") pod \"neutron-57b84ccfdc-qnsn7\" (UID: \"8912bda5-405a-472b-a80f-2140a7bb0ded\") " pod="openstack/neutron-57b84ccfdc-qnsn7" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.835225 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8912bda5-405a-472b-a80f-2140a7bb0ded-public-tls-certs\") pod \"neutron-57b84ccfdc-qnsn7\" (UID: \"8912bda5-405a-472b-a80f-2140a7bb0ded\") " pod="openstack/neutron-57b84ccfdc-qnsn7" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.835243 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08ef6b82-8aff-4b11-a0be-9e04670b96b7-logs\") pod \"glance-default-external-api-0\" (UID: \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.835723 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08ef6b82-8aff-4b11-a0be-9e04670b96b7-logs\") pod \"glance-default-external-api-0\" (UID: \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.836051 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/08ef6b82-8aff-4b11-a0be-9e04670b96b7-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.838930 4691 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-external-api-0" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.863808 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/08ef6b82-8aff-4b11-a0be-9e04670b96b7-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.864686 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08ef6b82-8aff-4b11-a0be-9e04670b96b7-config-data\") pod \"glance-default-external-api-0\" (UID: \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.865236 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/08ef6b82-8aff-4b11-a0be-9e04670b96b7-scripts\") pod \"glance-default-external-api-0\" (UID: \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.866751 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08ef6b82-8aff-4b11-a0be-9e04670b96b7-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.880432 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dnv92\" (UniqueName: \"kubernetes.io/projected/08ef6b82-8aff-4b11-a0be-9e04670b96b7-kube-api-access-dnv92\") pod \"glance-default-external-api-0\" (UID: \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.889065 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\") " pod="openstack/glance-default-external-api-0" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.947617 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\" (UID: \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\") " Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.947686 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f9499a7-a024-469f-a238-e4e3a60b4c9e-logs\") pod \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\" (UID: \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\") " Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.947777 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9f9499a7-a024-469f-a238-e4e3a60b4c9e-internal-tls-certs\") pod \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\" (UID: \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\") " Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.947836 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9f9499a7-a024-469f-a238-e4e3a60b4c9e-httpd-run\") pod \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\" (UID: \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\") " Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.947851 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f9499a7-a024-469f-a238-e4e3a60b4c9e-combined-ca-bundle\") pod \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\" (UID: \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\") " Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.947872 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnppg\" (UniqueName: \"kubernetes.io/projected/9f9499a7-a024-469f-a238-e4e3a60b4c9e-kube-api-access-rnppg\") pod \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\" (UID: \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\") " Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.947900 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9f9499a7-a024-469f-a238-e4e3a60b4c9e-scripts\") pod \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\" (UID: \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\") " Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.947973 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f9499a7-a024-469f-a238-e4e3a60b4c9e-config-data\") pod \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\" (UID: \"9f9499a7-a024-469f-a238-e4e3a60b4c9e\") " Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.948204 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8912bda5-405a-472b-a80f-2140a7bb0ded-internal-tls-certs\") pod \"neutron-57b84ccfdc-qnsn7\" (UID: \"8912bda5-405a-472b-a80f-2140a7bb0ded\") " pod="openstack/neutron-57b84ccfdc-qnsn7" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.948233 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/8912bda5-405a-472b-a80f-2140a7bb0ded-config\") pod \"neutron-57b84ccfdc-qnsn7\" (UID: \"8912bda5-405a-472b-a80f-2140a7bb0ded\") " pod="openstack/neutron-57b84ccfdc-qnsn7" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.948297 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8912bda5-405a-472b-a80f-2140a7bb0ded-ovndb-tls-certs\") pod \"neutron-57b84ccfdc-qnsn7\" (UID: \"8912bda5-405a-472b-a80f-2140a7bb0ded\") " pod="openstack/neutron-57b84ccfdc-qnsn7" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.948323 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/8912bda5-405a-472b-a80f-2140a7bb0ded-httpd-config\") pod \"neutron-57b84ccfdc-qnsn7\" (UID: \"8912bda5-405a-472b-a80f-2140a7bb0ded\") " pod="openstack/neutron-57b84ccfdc-qnsn7" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.948359 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8912bda5-405a-472b-a80f-2140a7bb0ded-public-tls-certs\") pod \"neutron-57b84ccfdc-qnsn7\" (UID: \"8912bda5-405a-472b-a80f-2140a7bb0ded\") " pod="openstack/neutron-57b84ccfdc-qnsn7" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.948393 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8j85\" (UniqueName: \"kubernetes.io/projected/8912bda5-405a-472b-a80f-2140a7bb0ded-kube-api-access-h8j85\") pod \"neutron-57b84ccfdc-qnsn7\" (UID: \"8912bda5-405a-472b-a80f-2140a7bb0ded\") " pod="openstack/neutron-57b84ccfdc-qnsn7" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.948510 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8912bda5-405a-472b-a80f-2140a7bb0ded-combined-ca-bundle\") pod \"neutron-57b84ccfdc-qnsn7\" (UID: \"8912bda5-405a-472b-a80f-2140a7bb0ded\") " pod="openstack/neutron-57b84ccfdc-qnsn7" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.954092 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.954316 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9f9499a7-a024-469f-a238-e4e3a60b4c9e-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "9f9499a7-a024-469f-a238-e4e3a60b4c9e" (UID: "9f9499a7-a024-469f-a238-e4e3a60b4c9e"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.961613 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9f9499a7-a024-469f-a238-e4e3a60b4c9e-logs" (OuterVolumeSpecName: "logs") pod "9f9499a7-a024-469f-a238-e4e3a60b4c9e" (UID: "9f9499a7-a024-469f-a238-e4e3a60b4c9e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.963121 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8912bda5-405a-472b-a80f-2140a7bb0ded-ovndb-tls-certs\") pod \"neutron-57b84ccfdc-qnsn7\" (UID: \"8912bda5-405a-472b-a80f-2140a7bb0ded\") " pod="openstack/neutron-57b84ccfdc-qnsn7" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.970978 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/8912bda5-405a-472b-a80f-2140a7bb0ded-config\") pod \"neutron-57b84ccfdc-qnsn7\" (UID: \"8912bda5-405a-472b-a80f-2140a7bb0ded\") " pod="openstack/neutron-57b84ccfdc-qnsn7" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.975379 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8912bda5-405a-472b-a80f-2140a7bb0ded-public-tls-certs\") pod \"neutron-57b84ccfdc-qnsn7\" (UID: \"8912bda5-405a-472b-a80f-2140a7bb0ded\") " pod="openstack/neutron-57b84ccfdc-qnsn7" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.985932 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f9499a7-a024-469f-a238-e4e3a60b4c9e-kube-api-access-rnppg" (OuterVolumeSpecName: "kube-api-access-rnppg") pod "9f9499a7-a024-469f-a238-e4e3a60b4c9e" (UID: "9f9499a7-a024-469f-a238-e4e3a60b4c9e"). InnerVolumeSpecName "kube-api-access-rnppg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:13:49 crc kubenswrapper[4691]: I1124 08:13:49.986925 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/8912bda5-405a-472b-a80f-2140a7bb0ded-httpd-config\") pod \"neutron-57b84ccfdc-qnsn7\" (UID: \"8912bda5-405a-472b-a80f-2140a7bb0ded\") " pod="openstack/neutron-57b84ccfdc-qnsn7" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.002770 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f9499a7-a024-469f-a238-e4e3a60b4c9e-scripts" (OuterVolumeSpecName: "scripts") pod "9f9499a7-a024-469f-a238-e4e3a60b4c9e" (UID: "9f9499a7-a024-469f-a238-e4e3a60b4c9e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.004339 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8912bda5-405a-472b-a80f-2140a7bb0ded-internal-tls-certs\") pod \"neutron-57b84ccfdc-qnsn7\" (UID: \"8912bda5-405a-472b-a80f-2140a7bb0ded\") " pod="openstack/neutron-57b84ccfdc-qnsn7" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.005020 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8912bda5-405a-472b-a80f-2140a7bb0ded-combined-ca-bundle\") pod \"neutron-57b84ccfdc-qnsn7\" (UID: \"8912bda5-405a-472b-a80f-2140a7bb0ded\") " pod="openstack/neutron-57b84ccfdc-qnsn7" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.031776 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8j85\" (UniqueName: \"kubernetes.io/projected/8912bda5-405a-472b-a80f-2140a7bb0ded-kube-api-access-h8j85\") pod \"neutron-57b84ccfdc-qnsn7\" (UID: \"8912bda5-405a-472b-a80f-2140a7bb0ded\") " pod="openstack/neutron-57b84ccfdc-qnsn7" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.060180 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "9f9499a7-a024-469f-a238-e4e3a60b4c9e" (UID: "9f9499a7-a024-469f-a238-e4e3a60b4c9e"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.061718 4691 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.061741 4691 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f9499a7-a024-469f-a238-e4e3a60b4c9e-logs\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.061751 4691 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9f9499a7-a024-469f-a238-e4e3a60b4c9e-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.061761 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnppg\" (UniqueName: \"kubernetes.io/projected/9f9499a7-a024-469f-a238-e4e3a60b4c9e-kube-api-access-rnppg\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.061772 4691 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9f9499a7-a024-469f-a238-e4e3a60b4c9e-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.062179 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-57b84ccfdc-qnsn7" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.066228 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7fc5776b84-69d6x" event={"ID":"019368b7-2336-4105-a06f-a05ce4cdcc60","Type":"ContainerStarted","Data":"3f2d31065c69e2013c05bd582aeae10523506355ca75683b23575687e055048a"} Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.066281 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7fc5776b84-69d6x" event={"ID":"019368b7-2336-4105-a06f-a05ce4cdcc60","Type":"ContainerStarted","Data":"b505c80d0a3e4a651ff0f71ed1fb9e016b5728a74c606f3afe30757f029deec3"} Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.066302 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7fc5776b84-69d6x" event={"ID":"019368b7-2336-4105-a06f-a05ce4cdcc60","Type":"ContainerStarted","Data":"57d1521d7f954c2a5ff240c7513699d5818fd0c459b6f598d6247f11fadf907b"} Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.067881 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-7fc5776b84-69d6x" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.069822 4691 generic.go:334] "Generic (PLEG): container finished" podID="9f9499a7-a024-469f-a238-e4e3a60b4c9e" containerID="28a36fbda116508d498b25f7b17ef6750e8fe0a9de58a350b0e49849fda26229" exitCode=0 Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.069839 4691 generic.go:334] "Generic (PLEG): container finished" podID="9f9499a7-a024-469f-a238-e4e3a60b4c9e" containerID="543844ececd8030c8113832fed659222da6eaf18292f29882c20cc745fc0ad0a" exitCode=143 Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.069874 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9f9499a7-a024-469f-a238-e4e3a60b4c9e","Type":"ContainerDied","Data":"28a36fbda116508d498b25f7b17ef6750e8fe0a9de58a350b0e49849fda26229"} Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.069893 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9f9499a7-a024-469f-a238-e4e3a60b4c9e","Type":"ContainerDied","Data":"543844ececd8030c8113832fed659222da6eaf18292f29882c20cc745fc0ad0a"} Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.069908 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9f9499a7-a024-469f-a238-e4e3a60b4c9e","Type":"ContainerDied","Data":"ca5d1cdde7d636f23b6af208b2dbb16997370ba17e105a1cb42f418809959dc6"} Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.069925 4691 scope.go:117] "RemoveContainer" containerID="28a36fbda116508d498b25f7b17ef6750e8fe0a9de58a350b0e49849fda26229" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.070529 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.090589 4691 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.105046 4691 generic.go:334] "Generic (PLEG): container finished" podID="060626b1-822b-4c3a-a1b0-a1c14fb04c18" containerID="13553884387f303980046b6fe32b69600377b3f3706c43be7e7564530165cb20" exitCode=0 Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.105190 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-bqjww" event={"ID":"060626b1-822b-4c3a-a1b0-a1c14fb04c18","Type":"ContainerDied","Data":"13553884387f303980046b6fe32b69600377b3f3706c43be7e7564530165cb20"} Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.106417 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-7fc5776b84-69d6x" podStartSLOduration=4.106393379 podStartE2EDuration="4.106393379s" podCreationTimestamp="2025-11-24 08:13:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:13:50.106131912 +0000 UTC m=+992.105081161" watchObservedRunningTime="2025-11-24 08:13:50.106393379 +0000 UTC m=+992.105342628" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.118704 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f9499a7-a024-469f-a238-e4e3a60b4c9e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9f9499a7-a024-469f-a238-e4e3a60b4c9e" (UID: "9f9499a7-a024-469f-a238-e4e3a60b4c9e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.127383 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3ad1b6c3-36a5-4991-b30f-092d3bf5018b","Type":"ContainerStarted","Data":"6ac82b7b26a93e0bfa2b4d452ebac20e4b054f53605396bee598e404148ffb03"} Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.159363 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f9499a7-a024-469f-a238-e4e3a60b4c9e-config-data" (OuterVolumeSpecName: "config-data") pod "9f9499a7-a024-469f-a238-e4e3a60b4c9e" (UID: "9f9499a7-a024-469f-a238-e4e3a60b4c9e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.163655 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f9499a7-a024-469f-a238-e4e3a60b4c9e-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.163683 4691 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.163692 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f9499a7-a024-469f-a238-e4e3a60b4c9e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.179651 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f9499a7-a024-469f-a238-e4e3a60b4c9e-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "9f9499a7-a024-469f-a238-e4e3a60b4c9e" (UID: "9f9499a7-a024-469f-a238-e4e3a60b4c9e"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.189338 4691 scope.go:117] "RemoveContainer" containerID="543844ececd8030c8113832fed659222da6eaf18292f29882c20cc745fc0ad0a" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.265604 4691 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9f9499a7-a024-469f-a238-e4e3a60b4c9e-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.447283 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.461711 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.474977 4691 scope.go:117] "RemoveContainer" containerID="28a36fbda116508d498b25f7b17ef6750e8fe0a9de58a350b0e49849fda26229" Nov 24 08:13:50 crc kubenswrapper[4691]: E1124 08:13:50.482250 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28a36fbda116508d498b25f7b17ef6750e8fe0a9de58a350b0e49849fda26229\": container with ID starting with 28a36fbda116508d498b25f7b17ef6750e8fe0a9de58a350b0e49849fda26229 not found: ID does not exist" containerID="28a36fbda116508d498b25f7b17ef6750e8fe0a9de58a350b0e49849fda26229" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.482297 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28a36fbda116508d498b25f7b17ef6750e8fe0a9de58a350b0e49849fda26229"} err="failed to get container status \"28a36fbda116508d498b25f7b17ef6750e8fe0a9de58a350b0e49849fda26229\": rpc error: code = NotFound desc = could not find container \"28a36fbda116508d498b25f7b17ef6750e8fe0a9de58a350b0e49849fda26229\": container with ID starting with 28a36fbda116508d498b25f7b17ef6750e8fe0a9de58a350b0e49849fda26229 not found: ID does not exist" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.482329 4691 scope.go:117] "RemoveContainer" containerID="543844ececd8030c8113832fed659222da6eaf18292f29882c20cc745fc0ad0a" Nov 24 08:13:50 crc kubenswrapper[4691]: E1124 08:13:50.482841 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"543844ececd8030c8113832fed659222da6eaf18292f29882c20cc745fc0ad0a\": container with ID starting with 543844ececd8030c8113832fed659222da6eaf18292f29882c20cc745fc0ad0a not found: ID does not exist" containerID="543844ececd8030c8113832fed659222da6eaf18292f29882c20cc745fc0ad0a" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.482872 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"543844ececd8030c8113832fed659222da6eaf18292f29882c20cc745fc0ad0a"} err="failed to get container status \"543844ececd8030c8113832fed659222da6eaf18292f29882c20cc745fc0ad0a\": rpc error: code = NotFound desc = could not find container \"543844ececd8030c8113832fed659222da6eaf18292f29882c20cc745fc0ad0a\": container with ID starting with 543844ececd8030c8113832fed659222da6eaf18292f29882c20cc745fc0ad0a not found: ID does not exist" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.482892 4691 scope.go:117] "RemoveContainer" containerID="28a36fbda116508d498b25f7b17ef6750e8fe0a9de58a350b0e49849fda26229" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.483367 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28a36fbda116508d498b25f7b17ef6750e8fe0a9de58a350b0e49849fda26229"} err="failed to get container status \"28a36fbda116508d498b25f7b17ef6750e8fe0a9de58a350b0e49849fda26229\": rpc error: code = NotFound desc = could not find container \"28a36fbda116508d498b25f7b17ef6750e8fe0a9de58a350b0e49849fda26229\": container with ID starting with 28a36fbda116508d498b25f7b17ef6750e8fe0a9de58a350b0e49849fda26229 not found: ID does not exist" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.483396 4691 scope.go:117] "RemoveContainer" containerID="543844ececd8030c8113832fed659222da6eaf18292f29882c20cc745fc0ad0a" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.484186 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"543844ececd8030c8113832fed659222da6eaf18292f29882c20cc745fc0ad0a"} err="failed to get container status \"543844ececd8030c8113832fed659222da6eaf18292f29882c20cc745fc0ad0a\": rpc error: code = NotFound desc = could not find container \"543844ececd8030c8113832fed659222da6eaf18292f29882c20cc745fc0ad0a\": container with ID starting with 543844ececd8030c8113832fed659222da6eaf18292f29882c20cc745fc0ad0a not found: ID does not exist" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.497524 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 08:13:50 crc kubenswrapper[4691]: E1124 08:13:50.498086 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f9499a7-a024-469f-a238-e4e3a60b4c9e" containerName="glance-log" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.498112 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f9499a7-a024-469f-a238-e4e3a60b4c9e" containerName="glance-log" Nov 24 08:13:50 crc kubenswrapper[4691]: E1124 08:13:50.498134 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f9499a7-a024-469f-a238-e4e3a60b4c9e" containerName="glance-httpd" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.498143 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f9499a7-a024-469f-a238-e4e3a60b4c9e" containerName="glance-httpd" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.498352 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f9499a7-a024-469f-a238-e4e3a60b4c9e" containerName="glance-httpd" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.498377 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f9499a7-a024-469f-a238-e4e3a60b4c9e" containerName="glance-log" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.499582 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.509751 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.510010 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.513327 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.706228 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8626dac3-0df1-42b8-8ea2-52239b7b73c3-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.707884 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8626dac3-0df1-42b8-8ea2-52239b7b73c3-logs\") pod \"glance-default-internal-api-0\" (UID: \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.707949 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nmglg\" (UniqueName: \"kubernetes.io/projected/8626dac3-0df1-42b8-8ea2-52239b7b73c3-kube-api-access-nmglg\") pod \"glance-default-internal-api-0\" (UID: \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.708092 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8626dac3-0df1-42b8-8ea2-52239b7b73c3-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.708208 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8626dac3-0df1-42b8-8ea2-52239b7b73c3-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.708340 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8626dac3-0df1-42b8-8ea2-52239b7b73c3-config-data\") pod \"glance-default-internal-api-0\" (UID: \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.708408 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.708552 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8626dac3-0df1-42b8-8ea2-52239b7b73c3-scripts\") pod \"glance-default-internal-api-0\" (UID: \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.777042 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="998605ae-db3f-4eb2-9345-d5d940b82461" path="/var/lib/kubelet/pods/998605ae-db3f-4eb2-9345-d5d940b82461/volumes" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.778194 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9f9499a7-a024-469f-a238-e4e3a60b4c9e" path="/var/lib/kubelet/pods/9f9499a7-a024-469f-a238-e4e3a60b4c9e/volumes" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.809972 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8626dac3-0df1-42b8-8ea2-52239b7b73c3-config-data\") pod \"glance-default-internal-api-0\" (UID: \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.810051 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.810112 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8626dac3-0df1-42b8-8ea2-52239b7b73c3-scripts\") pod \"glance-default-internal-api-0\" (UID: \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.810150 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8626dac3-0df1-42b8-8ea2-52239b7b73c3-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.810174 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8626dac3-0df1-42b8-8ea2-52239b7b73c3-logs\") pod \"glance-default-internal-api-0\" (UID: \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.810208 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nmglg\" (UniqueName: \"kubernetes.io/projected/8626dac3-0df1-42b8-8ea2-52239b7b73c3-kube-api-access-nmglg\") pod \"glance-default-internal-api-0\" (UID: \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.810291 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8626dac3-0df1-42b8-8ea2-52239b7b73c3-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.810346 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8626dac3-0df1-42b8-8ea2-52239b7b73c3-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.811115 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8626dac3-0df1-42b8-8ea2-52239b7b73c3-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.814424 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8626dac3-0df1-42b8-8ea2-52239b7b73c3-logs\") pod \"glance-default-internal-api-0\" (UID: \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.815026 4691 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-internal-api-0" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.821417 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8626dac3-0df1-42b8-8ea2-52239b7b73c3-scripts\") pod \"glance-default-internal-api-0\" (UID: \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.821481 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8626dac3-0df1-42b8-8ea2-52239b7b73c3-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.823839 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8626dac3-0df1-42b8-8ea2-52239b7b73c3-config-data\") pod \"glance-default-internal-api-0\" (UID: \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.828201 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8626dac3-0df1-42b8-8ea2-52239b7b73c3-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.836992 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nmglg\" (UniqueName: \"kubernetes.io/projected/8626dac3-0df1-42b8-8ea2-52239b7b73c3-kube-api-access-nmglg\") pod \"glance-default-internal-api-0\" (UID: \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:50 crc kubenswrapper[4691]: I1124 08:13:50.909188 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:13:51 crc kubenswrapper[4691]: I1124 08:13:51.062192 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-57b84ccfdc-qnsn7"] Nov 24 08:13:51 crc kubenswrapper[4691]: I1124 08:13:51.090740 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:13:51 crc kubenswrapper[4691]: I1124 08:13:51.090836 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:13:51 crc kubenswrapper[4691]: I1124 08:13:51.090879 4691 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 08:13:51 crc kubenswrapper[4691]: I1124 08:13:51.091586 4691 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8d580292dc3a8a86e61ece515d1a697fe0192e1bffaa2352b8d538c10b88fced"} pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 08:13:51 crc kubenswrapper[4691]: I1124 08:13:51.091631 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" containerID="cri-o://8d580292dc3a8a86e61ece515d1a697fe0192e1bffaa2352b8d538c10b88fced" gracePeriod=600 Nov 24 08:13:51 crc kubenswrapper[4691]: I1124 08:13:51.173736 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-bqjww" event={"ID":"060626b1-822b-4c3a-a1b0-a1c14fb04c18","Type":"ContainerStarted","Data":"0c2904c396c77b022c04b24128f89ae250630983439281e5cef40acfb847709b"} Nov 24 08:13:51 crc kubenswrapper[4691]: I1124 08:13:51.177563 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6b7b667979-bqjww" Nov 24 08:13:51 crc kubenswrapper[4691]: I1124 08:13:51.188954 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-57b84ccfdc-qnsn7" event={"ID":"8912bda5-405a-472b-a80f-2140a7bb0ded","Type":"ContainerStarted","Data":"1bca155901bb7e8d9caf0de059251e5e5cc5327035a39aec47380492665a4091"} Nov 24 08:13:51 crc kubenswrapper[4691]: I1124 08:13:51.212306 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6b7b667979-bqjww" podStartSLOduration=5.211397143 podStartE2EDuration="5.211397143s" podCreationTimestamp="2025-11-24 08:13:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:13:51.204844284 +0000 UTC m=+993.203793533" watchObservedRunningTime="2025-11-24 08:13:51.211397143 +0000 UTC m=+993.210346392" Nov 24 08:13:51 crc kubenswrapper[4691]: I1124 08:13:51.212718 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 24 08:13:51 crc kubenswrapper[4691]: I1124 08:13:51.750884 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 08:13:51 crc kubenswrapper[4691]: W1124 08:13:51.756413 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod08ef6b82_8aff_4b11_a0be_9e04670b96b7.slice/crio-71a233985b2bf39462857dfe54a9ceaaa36bdfe3a035db9fd44667362b53b3c0 WatchSource:0}: Error finding container 71a233985b2bf39462857dfe54a9ceaaa36bdfe3a035db9fd44667362b53b3c0: Status 404 returned error can't find the container with id 71a233985b2bf39462857dfe54a9ceaaa36bdfe3a035db9fd44667362b53b3c0 Nov 24 08:13:51 crc kubenswrapper[4691]: I1124 08:13:51.946763 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 08:13:51 crc kubenswrapper[4691]: W1124 08:13:51.971734 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8626dac3_0df1_42b8_8ea2_52239b7b73c3.slice/crio-9d8c93534e2f44793a40fac94db6a83609eae2776d35c5e359fa90d5651de361 WatchSource:0}: Error finding container 9d8c93534e2f44793a40fac94db6a83609eae2776d35c5e359fa90d5651de361: Status 404 returned error can't find the container with id 9d8c93534e2f44793a40fac94db6a83609eae2776d35c5e359fa90d5651de361 Nov 24 08:13:52 crc kubenswrapper[4691]: I1124 08:13:52.227566 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8626dac3-0df1-42b8-8ea2-52239b7b73c3","Type":"ContainerStarted","Data":"9d8c93534e2f44793a40fac94db6a83609eae2776d35c5e359fa90d5651de361"} Nov 24 08:13:52 crc kubenswrapper[4691]: I1124 08:13:52.233525 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-57b84ccfdc-qnsn7" event={"ID":"8912bda5-405a-472b-a80f-2140a7bb0ded","Type":"ContainerStarted","Data":"b77d820180f7c0fa89b114f5091a594ca0d762651a182e774e8e43f16ff14cee"} Nov 24 08:13:52 crc kubenswrapper[4691]: I1124 08:13:52.233638 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-57b84ccfdc-qnsn7" event={"ID":"8912bda5-405a-472b-a80f-2140a7bb0ded","Type":"ContainerStarted","Data":"c08b59c09e09cdbc890a18d0369c44ce399660113661860907da51c8e8c89ba5"} Nov 24 08:13:52 crc kubenswrapper[4691]: I1124 08:13:52.233749 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-57b84ccfdc-qnsn7" Nov 24 08:13:52 crc kubenswrapper[4691]: I1124 08:13:52.251744 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"08ef6b82-8aff-4b11-a0be-9e04670b96b7","Type":"ContainerStarted","Data":"71a233985b2bf39462857dfe54a9ceaaa36bdfe3a035db9fd44667362b53b3c0"} Nov 24 08:13:52 crc kubenswrapper[4691]: I1124 08:13:52.262158 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-57b84ccfdc-qnsn7" podStartSLOduration=3.262140544 podStartE2EDuration="3.262140544s" podCreationTimestamp="2025-11-24 08:13:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:13:52.259033814 +0000 UTC m=+994.257983063" watchObservedRunningTime="2025-11-24 08:13:52.262140544 +0000 UTC m=+994.261089783" Nov 24 08:13:52 crc kubenswrapper[4691]: I1124 08:13:52.293916 4691 generic.go:334] "Generic (PLEG): container finished" podID="d1c22b4d-4593-461a-9096-f81674b136b7" containerID="887c882bd1c813a99b7ee9710fdb9ffcb4a7f44ce0c7818337a53a27ae5d63ad" exitCode=0 Nov 24 08:13:52 crc kubenswrapper[4691]: I1124 08:13:52.294033 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-tx7zx" event={"ID":"d1c22b4d-4593-461a-9096-f81674b136b7","Type":"ContainerDied","Data":"887c882bd1c813a99b7ee9710fdb9ffcb4a7f44ce0c7818337a53a27ae5d63ad"} Nov 24 08:13:52 crc kubenswrapper[4691]: I1124 08:13:52.303213 4691 generic.go:334] "Generic (PLEG): container finished" podID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerID="8d580292dc3a8a86e61ece515d1a697fe0192e1bffaa2352b8d538c10b88fced" exitCode=0 Nov 24 08:13:52 crc kubenswrapper[4691]: I1124 08:13:52.304156 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerDied","Data":"8d580292dc3a8a86e61ece515d1a697fe0192e1bffaa2352b8d538c10b88fced"} Nov 24 08:13:52 crc kubenswrapper[4691]: I1124 08:13:52.304183 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerStarted","Data":"914d4816a735e64c132874c27ca7b7bbe33f77f07f7911089de6ac4d29c8f36b"} Nov 24 08:13:52 crc kubenswrapper[4691]: I1124 08:13:52.304201 4691 scope.go:117] "RemoveContainer" containerID="4adbbde14ca91fb132e770900c2c7d789c1a43897b472649dcf3666cd980576b" Nov 24 08:13:53 crc kubenswrapper[4691]: I1124 08:13:53.338465 4691 generic.go:334] "Generic (PLEG): container finished" podID="09239709-f618-437f-a720-070aff572294" containerID="ece511d3f226f171956b90efd3e6ccc5790855743bad985a4cf5a2e3be617515" exitCode=0 Nov 24 08:13:53 crc kubenswrapper[4691]: I1124 08:13:53.338529 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-qvhpm" event={"ID":"09239709-f618-437f-a720-070aff572294","Type":"ContainerDied","Data":"ece511d3f226f171956b90efd3e6ccc5790855743bad985a4cf5a2e3be617515"} Nov 24 08:13:53 crc kubenswrapper[4691]: I1124 08:13:53.352394 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8626dac3-0df1-42b8-8ea2-52239b7b73c3","Type":"ContainerStarted","Data":"4c790e2944796a4992eff9a8a3b45102a0210263bf2eb0726d29adc7c9799463"} Nov 24 08:13:53 crc kubenswrapper[4691]: I1124 08:13:53.372400 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"08ef6b82-8aff-4b11-a0be-9e04670b96b7","Type":"ContainerStarted","Data":"f70b2f9247b81635da2926d0c6cc2fa525d9763c1173574682335fe1a03af738"} Nov 24 08:13:53 crc kubenswrapper[4691]: I1124 08:13:53.377956 4691 generic.go:334] "Generic (PLEG): container finished" podID="faf5645f-a25c-4bde-9769-51e1681b7eba" containerID="50c8d054ee05ed39c26785d05b501867d1dcf4ff7d3640dbd3b26d4295f2a892" exitCode=0 Nov 24 08:13:53 crc kubenswrapper[4691]: I1124 08:13:53.378016 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-8g8rz" event={"ID":"faf5645f-a25c-4bde-9769-51e1681b7eba","Type":"ContainerDied","Data":"50c8d054ee05ed39c26785d05b501867d1dcf4ff7d3640dbd3b26d4295f2a892"} Nov 24 08:13:54 crc kubenswrapper[4691]: I1124 08:13:54.097759 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-77477f4d7b-kclfz" Nov 24 08:13:54 crc kubenswrapper[4691]: I1124 08:13:54.098227 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-77477f4d7b-kclfz" Nov 24 08:13:54 crc kubenswrapper[4691]: I1124 08:13:54.213325 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-5fb4677cdd-69rb6" Nov 24 08:13:54 crc kubenswrapper[4691]: I1124 08:13:54.214421 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5fb4677cdd-69rb6" Nov 24 08:13:54 crc kubenswrapper[4691]: I1124 08:13:54.408924 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"08ef6b82-8aff-4b11-a0be-9e04670b96b7","Type":"ContainerStarted","Data":"2fcedd905e14a1d955cf31b9414c55e5729165fb33ec43be8b06caf8c3d5383c"} Nov 24 08:13:54 crc kubenswrapper[4691]: I1124 08:13:54.413055 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8626dac3-0df1-42b8-8ea2-52239b7b73c3","Type":"ContainerStarted","Data":"2dd5802e29187d4f6fe90a3efea1e88df7d7cbfb2877e458550a436462c05df1"} Nov 24 08:13:54 crc kubenswrapper[4691]: I1124 08:13:54.450872 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.450851746 podStartE2EDuration="5.450851746s" podCreationTimestamp="2025-11-24 08:13:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:13:54.437956212 +0000 UTC m=+996.436905461" watchObservedRunningTime="2025-11-24 08:13:54.450851746 +0000 UTC m=+996.449800995" Nov 24 08:13:54 crc kubenswrapper[4691]: I1124 08:13:54.477522 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=4.477498178 podStartE2EDuration="4.477498178s" podCreationTimestamp="2025-11-24 08:13:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:13:54.469749064 +0000 UTC m=+996.468698313" watchObservedRunningTime="2025-11-24 08:13:54.477498178 +0000 UTC m=+996.476447427" Nov 24 08:13:56 crc kubenswrapper[4691]: I1124 08:13:56.120566 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-tx7zx" Nov 24 08:13:56 crc kubenswrapper[4691]: I1124 08:13:56.291541 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kgpjc\" (UniqueName: \"kubernetes.io/projected/d1c22b4d-4593-461a-9096-f81674b136b7-kube-api-access-kgpjc\") pod \"d1c22b4d-4593-461a-9096-f81674b136b7\" (UID: \"d1c22b4d-4593-461a-9096-f81674b136b7\") " Nov 24 08:13:56 crc kubenswrapper[4691]: I1124 08:13:56.291688 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d1c22b4d-4593-461a-9096-f81674b136b7-credential-keys\") pod \"d1c22b4d-4593-461a-9096-f81674b136b7\" (UID: \"d1c22b4d-4593-461a-9096-f81674b136b7\") " Nov 24 08:13:56 crc kubenswrapper[4691]: I1124 08:13:56.291720 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d1c22b4d-4593-461a-9096-f81674b136b7-fernet-keys\") pod \"d1c22b4d-4593-461a-9096-f81674b136b7\" (UID: \"d1c22b4d-4593-461a-9096-f81674b136b7\") " Nov 24 08:13:56 crc kubenswrapper[4691]: I1124 08:13:56.291861 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1c22b4d-4593-461a-9096-f81674b136b7-config-data\") pod \"d1c22b4d-4593-461a-9096-f81674b136b7\" (UID: \"d1c22b4d-4593-461a-9096-f81674b136b7\") " Nov 24 08:13:56 crc kubenswrapper[4691]: I1124 08:13:56.291908 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1c22b4d-4593-461a-9096-f81674b136b7-scripts\") pod \"d1c22b4d-4593-461a-9096-f81674b136b7\" (UID: \"d1c22b4d-4593-461a-9096-f81674b136b7\") " Nov 24 08:13:56 crc kubenswrapper[4691]: I1124 08:13:56.291999 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1c22b4d-4593-461a-9096-f81674b136b7-combined-ca-bundle\") pod \"d1c22b4d-4593-461a-9096-f81674b136b7\" (UID: \"d1c22b4d-4593-461a-9096-f81674b136b7\") " Nov 24 08:13:56 crc kubenswrapper[4691]: I1124 08:13:56.298853 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1c22b4d-4593-461a-9096-f81674b136b7-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "d1c22b4d-4593-461a-9096-f81674b136b7" (UID: "d1c22b4d-4593-461a-9096-f81674b136b7"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:56 crc kubenswrapper[4691]: I1124 08:13:56.300269 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1c22b4d-4593-461a-9096-f81674b136b7-scripts" (OuterVolumeSpecName: "scripts") pod "d1c22b4d-4593-461a-9096-f81674b136b7" (UID: "d1c22b4d-4593-461a-9096-f81674b136b7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:56 crc kubenswrapper[4691]: I1124 08:13:56.301521 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1c22b4d-4593-461a-9096-f81674b136b7-kube-api-access-kgpjc" (OuterVolumeSpecName: "kube-api-access-kgpjc") pod "d1c22b4d-4593-461a-9096-f81674b136b7" (UID: "d1c22b4d-4593-461a-9096-f81674b136b7"). InnerVolumeSpecName "kube-api-access-kgpjc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:13:56 crc kubenswrapper[4691]: I1124 08:13:56.305535 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1c22b4d-4593-461a-9096-f81674b136b7-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "d1c22b4d-4593-461a-9096-f81674b136b7" (UID: "d1c22b4d-4593-461a-9096-f81674b136b7"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:56 crc kubenswrapper[4691]: I1124 08:13:56.328566 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1c22b4d-4593-461a-9096-f81674b136b7-config-data" (OuterVolumeSpecName: "config-data") pod "d1c22b4d-4593-461a-9096-f81674b136b7" (UID: "d1c22b4d-4593-461a-9096-f81674b136b7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:56 crc kubenswrapper[4691]: I1124 08:13:56.367743 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1c22b4d-4593-461a-9096-f81674b136b7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d1c22b4d-4593-461a-9096-f81674b136b7" (UID: "d1c22b4d-4593-461a-9096-f81674b136b7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:56 crc kubenswrapper[4691]: I1124 08:13:56.396825 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kgpjc\" (UniqueName: \"kubernetes.io/projected/d1c22b4d-4593-461a-9096-f81674b136b7-kube-api-access-kgpjc\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:56 crc kubenswrapper[4691]: I1124 08:13:56.396872 4691 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d1c22b4d-4593-461a-9096-f81674b136b7-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:56 crc kubenswrapper[4691]: I1124 08:13:56.396884 4691 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d1c22b4d-4593-461a-9096-f81674b136b7-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:56 crc kubenswrapper[4691]: I1124 08:13:56.396894 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1c22b4d-4593-461a-9096-f81674b136b7-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:56 crc kubenswrapper[4691]: I1124 08:13:56.396905 4691 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1c22b4d-4593-461a-9096-f81674b136b7-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:56 crc kubenswrapper[4691]: I1124 08:13:56.396916 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1c22b4d-4593-461a-9096-f81674b136b7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:56 crc kubenswrapper[4691]: I1124 08:13:56.726020 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-tx7zx" event={"ID":"d1c22b4d-4593-461a-9096-f81674b136b7","Type":"ContainerDied","Data":"8ea1acb5b1b4ccfc1d41c2d027dee9f2f64aef7bb56ce4940e41f2a93bbc4b59"} Nov 24 08:13:56 crc kubenswrapper[4691]: I1124 08:13:56.726396 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8ea1acb5b1b4ccfc1d41c2d027dee9f2f64aef7bb56ce4940e41f2a93bbc4b59" Nov 24 08:13:56 crc kubenswrapper[4691]: I1124 08:13:56.726065 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-tx7zx" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.211626 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6b7b667979-bqjww" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.228328 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-54fc9d9c65-98hdh"] Nov 24 08:13:57 crc kubenswrapper[4691]: E1124 08:13:57.228763 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1c22b4d-4593-461a-9096-f81674b136b7" containerName="keystone-bootstrap" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.228781 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1c22b4d-4593-461a-9096-f81674b136b7" containerName="keystone-bootstrap" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.228998 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1c22b4d-4593-461a-9096-f81674b136b7" containerName="keystone-bootstrap" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.229623 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-54fc9d9c65-98hdh" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.233888 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.234053 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.234155 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.234223 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-rw8tv" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.234300 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.234404 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.257531 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-54fc9d9c65-98hdh"] Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.334591 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-54t6g"] Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.335437 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-56df8fb6b7-54t6g" podUID="70e09fe5-1d59-4585-a97c-0fac3f622b07" containerName="dnsmasq-dns" containerID="cri-o://e519ccf8e2d5980411184dae79d01bd5ddc9ed046bc2227cb27ec45e4040059e" gracePeriod=10 Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.419396 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/75e3a295-29f6-49d4-91d5-c6bf791eebdd-internal-tls-certs\") pod \"keystone-54fc9d9c65-98hdh\" (UID: \"75e3a295-29f6-49d4-91d5-c6bf791eebdd\") " pod="openstack/keystone-54fc9d9c65-98hdh" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.419479 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/75e3a295-29f6-49d4-91d5-c6bf791eebdd-public-tls-certs\") pod \"keystone-54fc9d9c65-98hdh\" (UID: \"75e3a295-29f6-49d4-91d5-c6bf791eebdd\") " pod="openstack/keystone-54fc9d9c65-98hdh" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.419504 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qnwv2\" (UniqueName: \"kubernetes.io/projected/75e3a295-29f6-49d4-91d5-c6bf791eebdd-kube-api-access-qnwv2\") pod \"keystone-54fc9d9c65-98hdh\" (UID: \"75e3a295-29f6-49d4-91d5-c6bf791eebdd\") " pod="openstack/keystone-54fc9d9c65-98hdh" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.419531 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/75e3a295-29f6-49d4-91d5-c6bf791eebdd-fernet-keys\") pod \"keystone-54fc9d9c65-98hdh\" (UID: \"75e3a295-29f6-49d4-91d5-c6bf791eebdd\") " pod="openstack/keystone-54fc9d9c65-98hdh" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.419588 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75e3a295-29f6-49d4-91d5-c6bf791eebdd-config-data\") pod \"keystone-54fc9d9c65-98hdh\" (UID: \"75e3a295-29f6-49d4-91d5-c6bf791eebdd\") " pod="openstack/keystone-54fc9d9c65-98hdh" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.419654 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/75e3a295-29f6-49d4-91d5-c6bf791eebdd-credential-keys\") pod \"keystone-54fc9d9c65-98hdh\" (UID: \"75e3a295-29f6-49d4-91d5-c6bf791eebdd\") " pod="openstack/keystone-54fc9d9c65-98hdh" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.419689 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75e3a295-29f6-49d4-91d5-c6bf791eebdd-combined-ca-bundle\") pod \"keystone-54fc9d9c65-98hdh\" (UID: \"75e3a295-29f6-49d4-91d5-c6bf791eebdd\") " pod="openstack/keystone-54fc9d9c65-98hdh" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.419725 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/75e3a295-29f6-49d4-91d5-c6bf791eebdd-scripts\") pod \"keystone-54fc9d9c65-98hdh\" (UID: \"75e3a295-29f6-49d4-91d5-c6bf791eebdd\") " pod="openstack/keystone-54fc9d9c65-98hdh" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.521646 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/75e3a295-29f6-49d4-91d5-c6bf791eebdd-credential-keys\") pod \"keystone-54fc9d9c65-98hdh\" (UID: \"75e3a295-29f6-49d4-91d5-c6bf791eebdd\") " pod="openstack/keystone-54fc9d9c65-98hdh" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.521714 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75e3a295-29f6-49d4-91d5-c6bf791eebdd-combined-ca-bundle\") pod \"keystone-54fc9d9c65-98hdh\" (UID: \"75e3a295-29f6-49d4-91d5-c6bf791eebdd\") " pod="openstack/keystone-54fc9d9c65-98hdh" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.521748 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/75e3a295-29f6-49d4-91d5-c6bf791eebdd-scripts\") pod \"keystone-54fc9d9c65-98hdh\" (UID: \"75e3a295-29f6-49d4-91d5-c6bf791eebdd\") " pod="openstack/keystone-54fc9d9c65-98hdh" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.521794 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/75e3a295-29f6-49d4-91d5-c6bf791eebdd-internal-tls-certs\") pod \"keystone-54fc9d9c65-98hdh\" (UID: \"75e3a295-29f6-49d4-91d5-c6bf791eebdd\") " pod="openstack/keystone-54fc9d9c65-98hdh" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.521818 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/75e3a295-29f6-49d4-91d5-c6bf791eebdd-public-tls-certs\") pod \"keystone-54fc9d9c65-98hdh\" (UID: \"75e3a295-29f6-49d4-91d5-c6bf791eebdd\") " pod="openstack/keystone-54fc9d9c65-98hdh" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.521837 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qnwv2\" (UniqueName: \"kubernetes.io/projected/75e3a295-29f6-49d4-91d5-c6bf791eebdd-kube-api-access-qnwv2\") pod \"keystone-54fc9d9c65-98hdh\" (UID: \"75e3a295-29f6-49d4-91d5-c6bf791eebdd\") " pod="openstack/keystone-54fc9d9c65-98hdh" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.521865 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/75e3a295-29f6-49d4-91d5-c6bf791eebdd-fernet-keys\") pod \"keystone-54fc9d9c65-98hdh\" (UID: \"75e3a295-29f6-49d4-91d5-c6bf791eebdd\") " pod="openstack/keystone-54fc9d9c65-98hdh" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.521908 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75e3a295-29f6-49d4-91d5-c6bf791eebdd-config-data\") pod \"keystone-54fc9d9c65-98hdh\" (UID: \"75e3a295-29f6-49d4-91d5-c6bf791eebdd\") " pod="openstack/keystone-54fc9d9c65-98hdh" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.532148 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/75e3a295-29f6-49d4-91d5-c6bf791eebdd-fernet-keys\") pod \"keystone-54fc9d9c65-98hdh\" (UID: \"75e3a295-29f6-49d4-91d5-c6bf791eebdd\") " pod="openstack/keystone-54fc9d9c65-98hdh" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.532913 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/75e3a295-29f6-49d4-91d5-c6bf791eebdd-scripts\") pod \"keystone-54fc9d9c65-98hdh\" (UID: \"75e3a295-29f6-49d4-91d5-c6bf791eebdd\") " pod="openstack/keystone-54fc9d9c65-98hdh" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.533401 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/75e3a295-29f6-49d4-91d5-c6bf791eebdd-public-tls-certs\") pod \"keystone-54fc9d9c65-98hdh\" (UID: \"75e3a295-29f6-49d4-91d5-c6bf791eebdd\") " pod="openstack/keystone-54fc9d9c65-98hdh" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.533886 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/75e3a295-29f6-49d4-91d5-c6bf791eebdd-internal-tls-certs\") pod \"keystone-54fc9d9c65-98hdh\" (UID: \"75e3a295-29f6-49d4-91d5-c6bf791eebdd\") " pod="openstack/keystone-54fc9d9c65-98hdh" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.535431 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75e3a295-29f6-49d4-91d5-c6bf791eebdd-combined-ca-bundle\") pod \"keystone-54fc9d9c65-98hdh\" (UID: \"75e3a295-29f6-49d4-91d5-c6bf791eebdd\") " pod="openstack/keystone-54fc9d9c65-98hdh" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.536108 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75e3a295-29f6-49d4-91d5-c6bf791eebdd-config-data\") pod \"keystone-54fc9d9c65-98hdh\" (UID: \"75e3a295-29f6-49d4-91d5-c6bf791eebdd\") " pod="openstack/keystone-54fc9d9c65-98hdh" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.541490 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qnwv2\" (UniqueName: \"kubernetes.io/projected/75e3a295-29f6-49d4-91d5-c6bf791eebdd-kube-api-access-qnwv2\") pod \"keystone-54fc9d9c65-98hdh\" (UID: \"75e3a295-29f6-49d4-91d5-c6bf791eebdd\") " pod="openstack/keystone-54fc9d9c65-98hdh" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.559380 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/75e3a295-29f6-49d4-91d5-c6bf791eebdd-credential-keys\") pod \"keystone-54fc9d9c65-98hdh\" (UID: \"75e3a295-29f6-49d4-91d5-c6bf791eebdd\") " pod="openstack/keystone-54fc9d9c65-98hdh" Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.738689 4691 generic.go:334] "Generic (PLEG): container finished" podID="70e09fe5-1d59-4585-a97c-0fac3f622b07" containerID="e519ccf8e2d5980411184dae79d01bd5ddc9ed046bc2227cb27ec45e4040059e" exitCode=0 Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.738739 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-54t6g" event={"ID":"70e09fe5-1d59-4585-a97c-0fac3f622b07","Type":"ContainerDied","Data":"e519ccf8e2d5980411184dae79d01bd5ddc9ed046bc2227cb27ec45e4040059e"} Nov 24 08:13:57 crc kubenswrapper[4691]: I1124 08:13:57.855258 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-54fc9d9c65-98hdh" Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.473587 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-qvhpm" Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.503923 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-8g8rz" Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.645156 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/faf5645f-a25c-4bde-9769-51e1681b7eba-logs\") pod \"faf5645f-a25c-4bde-9769-51e1681b7eba\" (UID: \"faf5645f-a25c-4bde-9769-51e1681b7eba\") " Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.645261 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/09239709-f618-437f-a720-070aff572294-db-sync-config-data\") pod \"09239709-f618-437f-a720-070aff572294\" (UID: \"09239709-f618-437f-a720-070aff572294\") " Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.645326 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjbwv\" (UniqueName: \"kubernetes.io/projected/09239709-f618-437f-a720-070aff572294-kube-api-access-pjbwv\") pod \"09239709-f618-437f-a720-070aff572294\" (UID: \"09239709-f618-437f-a720-070aff572294\") " Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.645363 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09239709-f618-437f-a720-070aff572294-combined-ca-bundle\") pod \"09239709-f618-437f-a720-070aff572294\" (UID: \"09239709-f618-437f-a720-070aff572294\") " Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.645410 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/faf5645f-a25c-4bde-9769-51e1681b7eba-config-data\") pod \"faf5645f-a25c-4bde-9769-51e1681b7eba\" (UID: \"faf5645f-a25c-4bde-9769-51e1681b7eba\") " Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.645516 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wf86m\" (UniqueName: \"kubernetes.io/projected/faf5645f-a25c-4bde-9769-51e1681b7eba-kube-api-access-wf86m\") pod \"faf5645f-a25c-4bde-9769-51e1681b7eba\" (UID: \"faf5645f-a25c-4bde-9769-51e1681b7eba\") " Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.645601 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/faf5645f-a25c-4bde-9769-51e1681b7eba-combined-ca-bundle\") pod \"faf5645f-a25c-4bde-9769-51e1681b7eba\" (UID: \"faf5645f-a25c-4bde-9769-51e1681b7eba\") " Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.645692 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/faf5645f-a25c-4bde-9769-51e1681b7eba-scripts\") pod \"faf5645f-a25c-4bde-9769-51e1681b7eba\" (UID: \"faf5645f-a25c-4bde-9769-51e1681b7eba\") " Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.646814 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/faf5645f-a25c-4bde-9769-51e1681b7eba-logs" (OuterVolumeSpecName: "logs") pod "faf5645f-a25c-4bde-9769-51e1681b7eba" (UID: "faf5645f-a25c-4bde-9769-51e1681b7eba"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.668623 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09239709-f618-437f-a720-070aff572294-kube-api-access-pjbwv" (OuterVolumeSpecName: "kube-api-access-pjbwv") pod "09239709-f618-437f-a720-070aff572294" (UID: "09239709-f618-437f-a720-070aff572294"). InnerVolumeSpecName "kube-api-access-pjbwv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.674833 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09239709-f618-437f-a720-070aff572294-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "09239709-f618-437f-a720-070aff572294" (UID: "09239709-f618-437f-a720-070aff572294"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.675491 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/faf5645f-a25c-4bde-9769-51e1681b7eba-kube-api-access-wf86m" (OuterVolumeSpecName: "kube-api-access-wf86m") pod "faf5645f-a25c-4bde-9769-51e1681b7eba" (UID: "faf5645f-a25c-4bde-9769-51e1681b7eba"). InnerVolumeSpecName "kube-api-access-wf86m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.676722 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/faf5645f-a25c-4bde-9769-51e1681b7eba-scripts" (OuterVolumeSpecName: "scripts") pod "faf5645f-a25c-4bde-9769-51e1681b7eba" (UID: "faf5645f-a25c-4bde-9769-51e1681b7eba"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.679297 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09239709-f618-437f-a720-070aff572294-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "09239709-f618-437f-a720-070aff572294" (UID: "09239709-f618-437f-a720-070aff572294"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.706565 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/faf5645f-a25c-4bde-9769-51e1681b7eba-config-data" (OuterVolumeSpecName: "config-data") pod "faf5645f-a25c-4bde-9769-51e1681b7eba" (UID: "faf5645f-a25c-4bde-9769-51e1681b7eba"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.720618 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/faf5645f-a25c-4bde-9769-51e1681b7eba-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "faf5645f-a25c-4bde-9769-51e1681b7eba" (UID: "faf5645f-a25c-4bde-9769-51e1681b7eba"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.747499 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjbwv\" (UniqueName: \"kubernetes.io/projected/09239709-f618-437f-a720-070aff572294-kube-api-access-pjbwv\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.747538 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09239709-f618-437f-a720-070aff572294-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.747568 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/faf5645f-a25c-4bde-9769-51e1681b7eba-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.747581 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wf86m\" (UniqueName: \"kubernetes.io/projected/faf5645f-a25c-4bde-9769-51e1681b7eba-kube-api-access-wf86m\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.747592 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/faf5645f-a25c-4bde-9769-51e1681b7eba-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.747600 4691 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/faf5645f-a25c-4bde-9769-51e1681b7eba-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.747610 4691 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/faf5645f-a25c-4bde-9769-51e1681b7eba-logs\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.747618 4691 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/09239709-f618-437f-a720-070aff572294-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.781548 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-8g8rz" Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.783767 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-8g8rz" event={"ID":"faf5645f-a25c-4bde-9769-51e1681b7eba","Type":"ContainerDied","Data":"9334e8165dca42cd8a11e62bdc02a5dca97951253da38efa802c0f7707382626"} Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.783805 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9334e8165dca42cd8a11e62bdc02a5dca97951253da38efa802c0f7707382626" Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.814814 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3ad1b6c3-36a5-4991-b30f-092d3bf5018b","Type":"ContainerStarted","Data":"d38fd5e7f43b2207b83d5a6320067101a7c4eaae4438986f7b6d5307cbdc017a"} Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.820905 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-qvhpm" event={"ID":"09239709-f618-437f-a720-070aff572294","Type":"ContainerDied","Data":"b89cd7b6c28b3ca1e6bb31428ceb318dd3850a0c140d36323019e096ccc6e1c2"} Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.820950 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b89cd7b6c28b3ca1e6bb31428ceb318dd3850a0c140d36323019e096ccc6e1c2" Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.821014 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-qvhpm" Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.867266 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-54t6g" Nov 24 08:13:58 crc kubenswrapper[4691]: I1124 08:13:58.944648 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-54fc9d9c65-98hdh"] Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.051776 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/70e09fe5-1d59-4585-a97c-0fac3f622b07-config\") pod \"70e09fe5-1d59-4585-a97c-0fac3f622b07\" (UID: \"70e09fe5-1d59-4585-a97c-0fac3f622b07\") " Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.051935 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/70e09fe5-1d59-4585-a97c-0fac3f622b07-dns-swift-storage-0\") pod \"70e09fe5-1d59-4585-a97c-0fac3f622b07\" (UID: \"70e09fe5-1d59-4585-a97c-0fac3f622b07\") " Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.052024 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/70e09fe5-1d59-4585-a97c-0fac3f622b07-ovsdbserver-nb\") pod \"70e09fe5-1d59-4585-a97c-0fac3f622b07\" (UID: \"70e09fe5-1d59-4585-a97c-0fac3f622b07\") " Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.052097 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-86j9p\" (UniqueName: \"kubernetes.io/projected/70e09fe5-1d59-4585-a97c-0fac3f622b07-kube-api-access-86j9p\") pod \"70e09fe5-1d59-4585-a97c-0fac3f622b07\" (UID: \"70e09fe5-1d59-4585-a97c-0fac3f622b07\") " Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.052151 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/70e09fe5-1d59-4585-a97c-0fac3f622b07-ovsdbserver-sb\") pod \"70e09fe5-1d59-4585-a97c-0fac3f622b07\" (UID: \"70e09fe5-1d59-4585-a97c-0fac3f622b07\") " Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.052202 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/70e09fe5-1d59-4585-a97c-0fac3f622b07-dns-svc\") pod \"70e09fe5-1d59-4585-a97c-0fac3f622b07\" (UID: \"70e09fe5-1d59-4585-a97c-0fac3f622b07\") " Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.058604 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/70e09fe5-1d59-4585-a97c-0fac3f622b07-kube-api-access-86j9p" (OuterVolumeSpecName: "kube-api-access-86j9p") pod "70e09fe5-1d59-4585-a97c-0fac3f622b07" (UID: "70e09fe5-1d59-4585-a97c-0fac3f622b07"). InnerVolumeSpecName "kube-api-access-86j9p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.122182 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/70e09fe5-1d59-4585-a97c-0fac3f622b07-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "70e09fe5-1d59-4585-a97c-0fac3f622b07" (UID: "70e09fe5-1d59-4585-a97c-0fac3f622b07"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.126625 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/70e09fe5-1d59-4585-a97c-0fac3f622b07-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "70e09fe5-1d59-4585-a97c-0fac3f622b07" (UID: "70e09fe5-1d59-4585-a97c-0fac3f622b07"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.126961 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/70e09fe5-1d59-4585-a97c-0fac3f622b07-config" (OuterVolumeSpecName: "config") pod "70e09fe5-1d59-4585-a97c-0fac3f622b07" (UID: "70e09fe5-1d59-4585-a97c-0fac3f622b07"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.128866 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/70e09fe5-1d59-4585-a97c-0fac3f622b07-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "70e09fe5-1d59-4585-a97c-0fac3f622b07" (UID: "70e09fe5-1d59-4585-a97c-0fac3f622b07"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.145698 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/70e09fe5-1d59-4585-a97c-0fac3f622b07-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "70e09fe5-1d59-4585-a97c-0fac3f622b07" (UID: "70e09fe5-1d59-4585-a97c-0fac3f622b07"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.154262 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/70e09fe5-1d59-4585-a97c-0fac3f622b07-config\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.154305 4691 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/70e09fe5-1d59-4585-a97c-0fac3f622b07-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.154317 4691 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/70e09fe5-1d59-4585-a97c-0fac3f622b07-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.154326 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-86j9p\" (UniqueName: \"kubernetes.io/projected/70e09fe5-1d59-4585-a97c-0fac3f622b07-kube-api-access-86j9p\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.154336 4691 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/70e09fe5-1d59-4585-a97c-0fac3f622b07-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.154344 4691 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/70e09fe5-1d59-4585-a97c-0fac3f622b07-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.633480 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-77fc7f8568-9mx5z"] Nov 24 08:13:59 crc kubenswrapper[4691]: E1124 08:13:59.634224 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="faf5645f-a25c-4bde-9769-51e1681b7eba" containerName="placement-db-sync" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.634237 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="faf5645f-a25c-4bde-9769-51e1681b7eba" containerName="placement-db-sync" Nov 24 08:13:59 crc kubenswrapper[4691]: E1124 08:13:59.634255 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70e09fe5-1d59-4585-a97c-0fac3f622b07" containerName="init" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.634263 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="70e09fe5-1d59-4585-a97c-0fac3f622b07" containerName="init" Nov 24 08:13:59 crc kubenswrapper[4691]: E1124 08:13:59.634275 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09239709-f618-437f-a720-070aff572294" containerName="barbican-db-sync" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.634282 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="09239709-f618-437f-a720-070aff572294" containerName="barbican-db-sync" Nov 24 08:13:59 crc kubenswrapper[4691]: E1124 08:13:59.634305 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70e09fe5-1d59-4585-a97c-0fac3f622b07" containerName="dnsmasq-dns" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.634310 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="70e09fe5-1d59-4585-a97c-0fac3f622b07" containerName="dnsmasq-dns" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.634545 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="faf5645f-a25c-4bde-9769-51e1681b7eba" containerName="placement-db-sync" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.634565 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="70e09fe5-1d59-4585-a97c-0fac3f622b07" containerName="dnsmasq-dns" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.634580 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="09239709-f618-437f-a720-070aff572294" containerName="barbican-db-sync" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.635652 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-77fc7f8568-9mx5z" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.642671 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-77fc7f8568-9mx5z"] Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.651438 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.651721 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.651919 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.652025 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.652084 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-n869k" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.773998 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f4dml\" (UniqueName: \"kubernetes.io/projected/acaed1f5-7a77-46a1-936d-e0fa2a02767b-kube-api-access-f4dml\") pod \"placement-77fc7f8568-9mx5z\" (UID: \"acaed1f5-7a77-46a1-936d-e0fa2a02767b\") " pod="openstack/placement-77fc7f8568-9mx5z" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.774436 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/acaed1f5-7a77-46a1-936d-e0fa2a02767b-config-data\") pod \"placement-77fc7f8568-9mx5z\" (UID: \"acaed1f5-7a77-46a1-936d-e0fa2a02767b\") " pod="openstack/placement-77fc7f8568-9mx5z" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.774584 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/acaed1f5-7a77-46a1-936d-e0fa2a02767b-scripts\") pod \"placement-77fc7f8568-9mx5z\" (UID: \"acaed1f5-7a77-46a1-936d-e0fa2a02767b\") " pod="openstack/placement-77fc7f8568-9mx5z" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.774680 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/acaed1f5-7a77-46a1-936d-e0fa2a02767b-logs\") pod \"placement-77fc7f8568-9mx5z\" (UID: \"acaed1f5-7a77-46a1-936d-e0fa2a02767b\") " pod="openstack/placement-77fc7f8568-9mx5z" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.774980 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/acaed1f5-7a77-46a1-936d-e0fa2a02767b-public-tls-certs\") pod \"placement-77fc7f8568-9mx5z\" (UID: \"acaed1f5-7a77-46a1-936d-e0fa2a02767b\") " pod="openstack/placement-77fc7f8568-9mx5z" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.777209 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/acaed1f5-7a77-46a1-936d-e0fa2a02767b-combined-ca-bundle\") pod \"placement-77fc7f8568-9mx5z\" (UID: \"acaed1f5-7a77-46a1-936d-e0fa2a02767b\") " pod="openstack/placement-77fc7f8568-9mx5z" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.777400 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/acaed1f5-7a77-46a1-936d-e0fa2a02767b-internal-tls-certs\") pod \"placement-77fc7f8568-9mx5z\" (UID: \"acaed1f5-7a77-46a1-936d-e0fa2a02767b\") " pod="openstack/placement-77fc7f8568-9mx5z" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.820609 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-bcd6fbf67-bnwn2"] Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.824817 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-bcd6fbf67-bnwn2" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.845202 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-z7x2p" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.845276 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.853086 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-6866b57cd6-xcpbl"] Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.860010 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.870610 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6866b57cd6-xcpbl" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.880624 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/acaed1f5-7a77-46a1-936d-e0fa2a02767b-internal-tls-certs\") pod \"placement-77fc7f8568-9mx5z\" (UID: \"acaed1f5-7a77-46a1-936d-e0fa2a02767b\") " pod="openstack/placement-77fc7f8568-9mx5z" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.880753 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f4dml\" (UniqueName: \"kubernetes.io/projected/acaed1f5-7a77-46a1-936d-e0fa2a02767b-kube-api-access-f4dml\") pod \"placement-77fc7f8568-9mx5z\" (UID: \"acaed1f5-7a77-46a1-936d-e0fa2a02767b\") " pod="openstack/placement-77fc7f8568-9mx5z" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.880773 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/acaed1f5-7a77-46a1-936d-e0fa2a02767b-config-data\") pod \"placement-77fc7f8568-9mx5z\" (UID: \"acaed1f5-7a77-46a1-936d-e0fa2a02767b\") " pod="openstack/placement-77fc7f8568-9mx5z" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.880790 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/acaed1f5-7a77-46a1-936d-e0fa2a02767b-scripts\") pod \"placement-77fc7f8568-9mx5z\" (UID: \"acaed1f5-7a77-46a1-936d-e0fa2a02767b\") " pod="openstack/placement-77fc7f8568-9mx5z" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.880808 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/acaed1f5-7a77-46a1-936d-e0fa2a02767b-logs\") pod \"placement-77fc7f8568-9mx5z\" (UID: \"acaed1f5-7a77-46a1-936d-e0fa2a02767b\") " pod="openstack/placement-77fc7f8568-9mx5z" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.880914 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/acaed1f5-7a77-46a1-936d-e0fa2a02767b-public-tls-certs\") pod \"placement-77fc7f8568-9mx5z\" (UID: \"acaed1f5-7a77-46a1-936d-e0fa2a02767b\") " pod="openstack/placement-77fc7f8568-9mx5z" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.880987 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/acaed1f5-7a77-46a1-936d-e0fa2a02767b-combined-ca-bundle\") pod \"placement-77fc7f8568-9mx5z\" (UID: \"acaed1f5-7a77-46a1-936d-e0fa2a02767b\") " pod="openstack/placement-77fc7f8568-9mx5z" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.883757 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.884353 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-bcd6fbf67-bnwn2"] Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.886049 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/acaed1f5-7a77-46a1-936d-e0fa2a02767b-logs\") pod \"placement-77fc7f8568-9mx5z\" (UID: \"acaed1f5-7a77-46a1-936d-e0fa2a02767b\") " pod="openstack/placement-77fc7f8568-9mx5z" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.901842 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-54fc9d9c65-98hdh" event={"ID":"75e3a295-29f6-49d4-91d5-c6bf791eebdd","Type":"ContainerStarted","Data":"0fb9506acc3ce214dc3fff53f5d9de61cbdb99cbf558e88cb8a8d4357f8356a7"} Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.901931 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-54fc9d9c65-98hdh" event={"ID":"75e3a295-29f6-49d4-91d5-c6bf791eebdd","Type":"ContainerStarted","Data":"18bbe70d4edd3444098f7906fed380ea8cdf6d03443537edb788244ed1cbcdc0"} Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.902924 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-54fc9d9c65-98hdh" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.920936 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-6866b57cd6-xcpbl"] Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.922937 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/acaed1f5-7a77-46a1-936d-e0fa2a02767b-scripts\") pod \"placement-77fc7f8568-9mx5z\" (UID: \"acaed1f5-7a77-46a1-936d-e0fa2a02767b\") " pod="openstack/placement-77fc7f8568-9mx5z" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.923684 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/acaed1f5-7a77-46a1-936d-e0fa2a02767b-config-data\") pod \"placement-77fc7f8568-9mx5z\" (UID: \"acaed1f5-7a77-46a1-936d-e0fa2a02767b\") " pod="openstack/placement-77fc7f8568-9mx5z" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.924204 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/acaed1f5-7a77-46a1-936d-e0fa2a02767b-public-tls-certs\") pod \"placement-77fc7f8568-9mx5z\" (UID: \"acaed1f5-7a77-46a1-936d-e0fa2a02767b\") " pod="openstack/placement-77fc7f8568-9mx5z" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.929994 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/acaed1f5-7a77-46a1-936d-e0fa2a02767b-internal-tls-certs\") pod \"placement-77fc7f8568-9mx5z\" (UID: \"acaed1f5-7a77-46a1-936d-e0fa2a02767b\") " pod="openstack/placement-77fc7f8568-9mx5z" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.934193 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f4dml\" (UniqueName: \"kubernetes.io/projected/acaed1f5-7a77-46a1-936d-e0fa2a02767b-kube-api-access-f4dml\") pod \"placement-77fc7f8568-9mx5z\" (UID: \"acaed1f5-7a77-46a1-936d-e0fa2a02767b\") " pod="openstack/placement-77fc7f8568-9mx5z" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.950253 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/acaed1f5-7a77-46a1-936d-e0fa2a02767b-combined-ca-bundle\") pod \"placement-77fc7f8568-9mx5z\" (UID: \"acaed1f5-7a77-46a1-936d-e0fa2a02767b\") " pod="openstack/placement-77fc7f8568-9mx5z" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.962204 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.962251 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.970628 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-77fc7f8568-9mx5z" Nov 24 08:13:59 crc kubenswrapper[4691]: I1124 08:13:59.999821 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e39f7b55-5583-421f-a817-bae68533b497-config-data-custom\") pod \"barbican-worker-bcd6fbf67-bnwn2\" (UID: \"e39f7b55-5583-421f-a817-bae68533b497\") " pod="openstack/barbican-worker-bcd6fbf67-bnwn2" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.002671 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7l45p\" (UniqueName: \"kubernetes.io/projected/750147cd-32ed-4f3d-83e5-96798011bf10-kube-api-access-7l45p\") pod \"barbican-keystone-listener-6866b57cd6-xcpbl\" (UID: \"750147cd-32ed-4f3d-83e5-96798011bf10\") " pod="openstack/barbican-keystone-listener-6866b57cd6-xcpbl" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.002824 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e39f7b55-5583-421f-a817-bae68533b497-combined-ca-bundle\") pod \"barbican-worker-bcd6fbf67-bnwn2\" (UID: \"e39f7b55-5583-421f-a817-bae68533b497\") " pod="openstack/barbican-worker-bcd6fbf67-bnwn2" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.002926 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/750147cd-32ed-4f3d-83e5-96798011bf10-config-data\") pod \"barbican-keystone-listener-6866b57cd6-xcpbl\" (UID: \"750147cd-32ed-4f3d-83e5-96798011bf10\") " pod="openstack/barbican-keystone-listener-6866b57cd6-xcpbl" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.003026 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/750147cd-32ed-4f3d-83e5-96798011bf10-config-data-custom\") pod \"barbican-keystone-listener-6866b57cd6-xcpbl\" (UID: \"750147cd-32ed-4f3d-83e5-96798011bf10\") " pod="openstack/barbican-keystone-listener-6866b57cd6-xcpbl" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.003169 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e39f7b55-5583-421f-a817-bae68533b497-config-data\") pod \"barbican-worker-bcd6fbf67-bnwn2\" (UID: \"e39f7b55-5583-421f-a817-bae68533b497\") " pod="openstack/barbican-worker-bcd6fbf67-bnwn2" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.003318 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/750147cd-32ed-4f3d-83e5-96798011bf10-logs\") pod \"barbican-keystone-listener-6866b57cd6-xcpbl\" (UID: \"750147cd-32ed-4f3d-83e5-96798011bf10\") " pod="openstack/barbican-keystone-listener-6866b57cd6-xcpbl" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.021765 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/750147cd-32ed-4f3d-83e5-96798011bf10-combined-ca-bundle\") pod \"barbican-keystone-listener-6866b57cd6-xcpbl\" (UID: \"750147cd-32ed-4f3d-83e5-96798011bf10\") " pod="openstack/barbican-keystone-listener-6866b57cd6-xcpbl" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.022046 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ssvbx\" (UniqueName: \"kubernetes.io/projected/e39f7b55-5583-421f-a817-bae68533b497-kube-api-access-ssvbx\") pod \"barbican-worker-bcd6fbf67-bnwn2\" (UID: \"e39f7b55-5583-421f-a817-bae68533b497\") " pod="openstack/barbican-worker-bcd6fbf67-bnwn2" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.022137 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e39f7b55-5583-421f-a817-bae68533b497-logs\") pod \"barbican-worker-bcd6fbf67-bnwn2\" (UID: \"e39f7b55-5583-421f-a817-bae68533b497\") " pod="openstack/barbican-worker-bcd6fbf67-bnwn2" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.056891 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-54t6g" event={"ID":"70e09fe5-1d59-4585-a97c-0fac3f622b07","Type":"ContainerDied","Data":"348dd04aa3da8c825c94bcb059dc6b82613762fc10866eab9fae798582c8c65f"} Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.056946 4691 scope.go:117] "RemoveContainer" containerID="e519ccf8e2d5980411184dae79d01bd5ddc9ed046bc2227cb27ec45e4040059e" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.057115 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-54t6g" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.060134 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-2xhk7"] Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.067860 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-2xhk7" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.128649 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/750147cd-32ed-4f3d-83e5-96798011bf10-combined-ca-bundle\") pod \"barbican-keystone-listener-6866b57cd6-xcpbl\" (UID: \"750147cd-32ed-4f3d-83e5-96798011bf10\") " pod="openstack/barbican-keystone-listener-6866b57cd6-xcpbl" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.129007 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ssvbx\" (UniqueName: \"kubernetes.io/projected/e39f7b55-5583-421f-a817-bae68533b497-kube-api-access-ssvbx\") pod \"barbican-worker-bcd6fbf67-bnwn2\" (UID: \"e39f7b55-5583-421f-a817-bae68533b497\") " pod="openstack/barbican-worker-bcd6fbf67-bnwn2" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.129037 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e39f7b55-5583-421f-a817-bae68533b497-logs\") pod \"barbican-worker-bcd6fbf67-bnwn2\" (UID: \"e39f7b55-5583-421f-a817-bae68533b497\") " pod="openstack/barbican-worker-bcd6fbf67-bnwn2" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.129077 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e39f7b55-5583-421f-a817-bae68533b497-config-data-custom\") pod \"barbican-worker-bcd6fbf67-bnwn2\" (UID: \"e39f7b55-5583-421f-a817-bae68533b497\") " pod="openstack/barbican-worker-bcd6fbf67-bnwn2" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.129145 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7l45p\" (UniqueName: \"kubernetes.io/projected/750147cd-32ed-4f3d-83e5-96798011bf10-kube-api-access-7l45p\") pod \"barbican-keystone-listener-6866b57cd6-xcpbl\" (UID: \"750147cd-32ed-4f3d-83e5-96798011bf10\") " pod="openstack/barbican-keystone-listener-6866b57cd6-xcpbl" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.129161 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e39f7b55-5583-421f-a817-bae68533b497-combined-ca-bundle\") pod \"barbican-worker-bcd6fbf67-bnwn2\" (UID: \"e39f7b55-5583-421f-a817-bae68533b497\") " pod="openstack/barbican-worker-bcd6fbf67-bnwn2" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.129180 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/750147cd-32ed-4f3d-83e5-96798011bf10-config-data\") pod \"barbican-keystone-listener-6866b57cd6-xcpbl\" (UID: \"750147cd-32ed-4f3d-83e5-96798011bf10\") " pod="openstack/barbican-keystone-listener-6866b57cd6-xcpbl" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.129199 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/750147cd-32ed-4f3d-83e5-96798011bf10-config-data-custom\") pod \"barbican-keystone-listener-6866b57cd6-xcpbl\" (UID: \"750147cd-32ed-4f3d-83e5-96798011bf10\") " pod="openstack/barbican-keystone-listener-6866b57cd6-xcpbl" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.129249 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e39f7b55-5583-421f-a817-bae68533b497-config-data\") pod \"barbican-worker-bcd6fbf67-bnwn2\" (UID: \"e39f7b55-5583-421f-a817-bae68533b497\") " pod="openstack/barbican-worker-bcd6fbf67-bnwn2" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.129327 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/750147cd-32ed-4f3d-83e5-96798011bf10-logs\") pod \"barbican-keystone-listener-6866b57cd6-xcpbl\" (UID: \"750147cd-32ed-4f3d-83e5-96798011bf10\") " pod="openstack/barbican-keystone-listener-6866b57cd6-xcpbl" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.146602 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/750147cd-32ed-4f3d-83e5-96798011bf10-logs\") pod \"barbican-keystone-listener-6866b57cd6-xcpbl\" (UID: \"750147cd-32ed-4f3d-83e5-96798011bf10\") " pod="openstack/barbican-keystone-listener-6866b57cd6-xcpbl" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.147130 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e39f7b55-5583-421f-a817-bae68533b497-combined-ca-bundle\") pod \"barbican-worker-bcd6fbf67-bnwn2\" (UID: \"e39f7b55-5583-421f-a817-bae68533b497\") " pod="openstack/barbican-worker-bcd6fbf67-bnwn2" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.147584 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e39f7b55-5583-421f-a817-bae68533b497-logs\") pod \"barbican-worker-bcd6fbf67-bnwn2\" (UID: \"e39f7b55-5583-421f-a817-bae68533b497\") " pod="openstack/barbican-worker-bcd6fbf67-bnwn2" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.148864 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e39f7b55-5583-421f-a817-bae68533b497-config-data\") pod \"barbican-worker-bcd6fbf67-bnwn2\" (UID: \"e39f7b55-5583-421f-a817-bae68533b497\") " pod="openstack/barbican-worker-bcd6fbf67-bnwn2" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.149950 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7l45p\" (UniqueName: \"kubernetes.io/projected/750147cd-32ed-4f3d-83e5-96798011bf10-kube-api-access-7l45p\") pod \"barbican-keystone-listener-6866b57cd6-xcpbl\" (UID: \"750147cd-32ed-4f3d-83e5-96798011bf10\") " pod="openstack/barbican-keystone-listener-6866b57cd6-xcpbl" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.156376 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e39f7b55-5583-421f-a817-bae68533b497-config-data-custom\") pod \"barbican-worker-bcd6fbf67-bnwn2\" (UID: \"e39f7b55-5583-421f-a817-bae68533b497\") " pod="openstack/barbican-worker-bcd6fbf67-bnwn2" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.157110 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/750147cd-32ed-4f3d-83e5-96798011bf10-combined-ca-bundle\") pod \"barbican-keystone-listener-6866b57cd6-xcpbl\" (UID: \"750147cd-32ed-4f3d-83e5-96798011bf10\") " pod="openstack/barbican-keystone-listener-6866b57cd6-xcpbl" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.160114 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/750147cd-32ed-4f3d-83e5-96798011bf10-config-data\") pod \"barbican-keystone-listener-6866b57cd6-xcpbl\" (UID: \"750147cd-32ed-4f3d-83e5-96798011bf10\") " pod="openstack/barbican-keystone-listener-6866b57cd6-xcpbl" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.160146 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-2xhk7"] Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.166075 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/750147cd-32ed-4f3d-83e5-96798011bf10-config-data-custom\") pod \"barbican-keystone-listener-6866b57cd6-xcpbl\" (UID: \"750147cd-32ed-4f3d-83e5-96798011bf10\") " pod="openstack/barbican-keystone-listener-6866b57cd6-xcpbl" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.193808 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ssvbx\" (UniqueName: \"kubernetes.io/projected/e39f7b55-5583-421f-a817-bae68533b497-kube-api-access-ssvbx\") pod \"barbican-worker-bcd6fbf67-bnwn2\" (UID: \"e39f7b55-5583-421f-a817-bae68533b497\") " pod="openstack/barbican-worker-bcd6fbf67-bnwn2" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.199941 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-54fc9d9c65-98hdh" podStartSLOduration=3.199917879 podStartE2EDuration="3.199917879s" podCreationTimestamp="2025-11-24 08:13:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:14:00.012048935 +0000 UTC m=+1002.010998194" watchObservedRunningTime="2025-11-24 08:14:00.199917879 +0000 UTC m=+1002.198867128" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.213133 4691 scope.go:117] "RemoveContainer" containerID="e49eaeb23f880e45af73af17652fcc892ae3e7db8c73c79bbe6f0c48adf99b99" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.224670 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-54t6g"] Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.231734 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aad353e5-274f-4f13-b270-f8bf589fbc3e-dns-svc\") pod \"dnsmasq-dns-848cf88cfc-2xhk7\" (UID: \"aad353e5-274f-4f13-b270-f8bf589fbc3e\") " pod="openstack/dnsmasq-dns-848cf88cfc-2xhk7" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.236009 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t7bvv\" (UniqueName: \"kubernetes.io/projected/aad353e5-274f-4f13-b270-f8bf589fbc3e-kube-api-access-t7bvv\") pod \"dnsmasq-dns-848cf88cfc-2xhk7\" (UID: \"aad353e5-274f-4f13-b270-f8bf589fbc3e\") " pod="openstack/dnsmasq-dns-848cf88cfc-2xhk7" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.236100 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aad353e5-274f-4f13-b270-f8bf589fbc3e-ovsdbserver-nb\") pod \"dnsmasq-dns-848cf88cfc-2xhk7\" (UID: \"aad353e5-274f-4f13-b270-f8bf589fbc3e\") " pod="openstack/dnsmasq-dns-848cf88cfc-2xhk7" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.236327 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aad353e5-274f-4f13-b270-f8bf589fbc3e-config\") pod \"dnsmasq-dns-848cf88cfc-2xhk7\" (UID: \"aad353e5-274f-4f13-b270-f8bf589fbc3e\") " pod="openstack/dnsmasq-dns-848cf88cfc-2xhk7" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.236481 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/aad353e5-274f-4f13-b270-f8bf589fbc3e-dns-swift-storage-0\") pod \"dnsmasq-dns-848cf88cfc-2xhk7\" (UID: \"aad353e5-274f-4f13-b270-f8bf589fbc3e\") " pod="openstack/dnsmasq-dns-848cf88cfc-2xhk7" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.236562 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aad353e5-274f-4f13-b270-f8bf589fbc3e-ovsdbserver-sb\") pod \"dnsmasq-dns-848cf88cfc-2xhk7\" (UID: \"aad353e5-274f-4f13-b270-f8bf589fbc3e\") " pod="openstack/dnsmasq-dns-848cf88cfc-2xhk7" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.236754 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-54t6g"] Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.252251 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.255506 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-66c5785d58-5hgjb"] Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.257020 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-66c5785d58-5hgjb" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.261262 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.267787 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.282463 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-66c5785d58-5hgjb"] Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.340503 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t7bvv\" (UniqueName: \"kubernetes.io/projected/aad353e5-274f-4f13-b270-f8bf589fbc3e-kube-api-access-t7bvv\") pod \"dnsmasq-dns-848cf88cfc-2xhk7\" (UID: \"aad353e5-274f-4f13-b270-f8bf589fbc3e\") " pod="openstack/dnsmasq-dns-848cf88cfc-2xhk7" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.343755 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aad353e5-274f-4f13-b270-f8bf589fbc3e-ovsdbserver-nb\") pod \"dnsmasq-dns-848cf88cfc-2xhk7\" (UID: \"aad353e5-274f-4f13-b270-f8bf589fbc3e\") " pod="openstack/dnsmasq-dns-848cf88cfc-2xhk7" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.343843 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aad353e5-274f-4f13-b270-f8bf589fbc3e-config\") pod \"dnsmasq-dns-848cf88cfc-2xhk7\" (UID: \"aad353e5-274f-4f13-b270-f8bf589fbc3e\") " pod="openstack/dnsmasq-dns-848cf88cfc-2xhk7" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.343971 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/aad353e5-274f-4f13-b270-f8bf589fbc3e-dns-swift-storage-0\") pod \"dnsmasq-dns-848cf88cfc-2xhk7\" (UID: \"aad353e5-274f-4f13-b270-f8bf589fbc3e\") " pod="openstack/dnsmasq-dns-848cf88cfc-2xhk7" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.344067 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aad353e5-274f-4f13-b270-f8bf589fbc3e-ovsdbserver-sb\") pod \"dnsmasq-dns-848cf88cfc-2xhk7\" (UID: \"aad353e5-274f-4f13-b270-f8bf589fbc3e\") " pod="openstack/dnsmasq-dns-848cf88cfc-2xhk7" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.344217 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aad353e5-274f-4f13-b270-f8bf589fbc3e-dns-svc\") pod \"dnsmasq-dns-848cf88cfc-2xhk7\" (UID: \"aad353e5-274f-4f13-b270-f8bf589fbc3e\") " pod="openstack/dnsmasq-dns-848cf88cfc-2xhk7" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.348190 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aad353e5-274f-4f13-b270-f8bf589fbc3e-dns-svc\") pod \"dnsmasq-dns-848cf88cfc-2xhk7\" (UID: \"aad353e5-274f-4f13-b270-f8bf589fbc3e\") " pod="openstack/dnsmasq-dns-848cf88cfc-2xhk7" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.348423 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aad353e5-274f-4f13-b270-f8bf589fbc3e-config\") pod \"dnsmasq-dns-848cf88cfc-2xhk7\" (UID: \"aad353e5-274f-4f13-b270-f8bf589fbc3e\") " pod="openstack/dnsmasq-dns-848cf88cfc-2xhk7" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.348826 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/aad353e5-274f-4f13-b270-f8bf589fbc3e-dns-swift-storage-0\") pod \"dnsmasq-dns-848cf88cfc-2xhk7\" (UID: \"aad353e5-274f-4f13-b270-f8bf589fbc3e\") " pod="openstack/dnsmasq-dns-848cf88cfc-2xhk7" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.349137 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aad353e5-274f-4f13-b270-f8bf589fbc3e-ovsdbserver-nb\") pod \"dnsmasq-dns-848cf88cfc-2xhk7\" (UID: \"aad353e5-274f-4f13-b270-f8bf589fbc3e\") " pod="openstack/dnsmasq-dns-848cf88cfc-2xhk7" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.349517 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aad353e5-274f-4f13-b270-f8bf589fbc3e-ovsdbserver-sb\") pod \"dnsmasq-dns-848cf88cfc-2xhk7\" (UID: \"aad353e5-274f-4f13-b270-f8bf589fbc3e\") " pod="openstack/dnsmasq-dns-848cf88cfc-2xhk7" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.363088 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t7bvv\" (UniqueName: \"kubernetes.io/projected/aad353e5-274f-4f13-b270-f8bf589fbc3e-kube-api-access-t7bvv\") pod \"dnsmasq-dns-848cf88cfc-2xhk7\" (UID: \"aad353e5-274f-4f13-b270-f8bf589fbc3e\") " pod="openstack/dnsmasq-dns-848cf88cfc-2xhk7" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.427905 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6866b57cd6-xcpbl" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.448511 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pvhx6\" (UniqueName: \"kubernetes.io/projected/ffa5629b-a725-48dd-a0ed-7b5d3b481189-kube-api-access-pvhx6\") pod \"barbican-api-66c5785d58-5hgjb\" (UID: \"ffa5629b-a725-48dd-a0ed-7b5d3b481189\") " pod="openstack/barbican-api-66c5785d58-5hgjb" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.448569 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ffa5629b-a725-48dd-a0ed-7b5d3b481189-config-data-custom\") pod \"barbican-api-66c5785d58-5hgjb\" (UID: \"ffa5629b-a725-48dd-a0ed-7b5d3b481189\") " pod="openstack/barbican-api-66c5785d58-5hgjb" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.448592 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ffa5629b-a725-48dd-a0ed-7b5d3b481189-logs\") pod \"barbican-api-66c5785d58-5hgjb\" (UID: \"ffa5629b-a725-48dd-a0ed-7b5d3b481189\") " pod="openstack/barbican-api-66c5785d58-5hgjb" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.448622 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffa5629b-a725-48dd-a0ed-7b5d3b481189-config-data\") pod \"barbican-api-66c5785d58-5hgjb\" (UID: \"ffa5629b-a725-48dd-a0ed-7b5d3b481189\") " pod="openstack/barbican-api-66c5785d58-5hgjb" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.448639 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffa5629b-a725-48dd-a0ed-7b5d3b481189-combined-ca-bundle\") pod \"barbican-api-66c5785d58-5hgjb\" (UID: \"ffa5629b-a725-48dd-a0ed-7b5d3b481189\") " pod="openstack/barbican-api-66c5785d58-5hgjb" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.477016 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-bcd6fbf67-bnwn2" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.477926 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-2xhk7" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.550636 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pvhx6\" (UniqueName: \"kubernetes.io/projected/ffa5629b-a725-48dd-a0ed-7b5d3b481189-kube-api-access-pvhx6\") pod \"barbican-api-66c5785d58-5hgjb\" (UID: \"ffa5629b-a725-48dd-a0ed-7b5d3b481189\") " pod="openstack/barbican-api-66c5785d58-5hgjb" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.550695 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ffa5629b-a725-48dd-a0ed-7b5d3b481189-config-data-custom\") pod \"barbican-api-66c5785d58-5hgjb\" (UID: \"ffa5629b-a725-48dd-a0ed-7b5d3b481189\") " pod="openstack/barbican-api-66c5785d58-5hgjb" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.550726 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ffa5629b-a725-48dd-a0ed-7b5d3b481189-logs\") pod \"barbican-api-66c5785d58-5hgjb\" (UID: \"ffa5629b-a725-48dd-a0ed-7b5d3b481189\") " pod="openstack/barbican-api-66c5785d58-5hgjb" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.550758 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffa5629b-a725-48dd-a0ed-7b5d3b481189-config-data\") pod \"barbican-api-66c5785d58-5hgjb\" (UID: \"ffa5629b-a725-48dd-a0ed-7b5d3b481189\") " pod="openstack/barbican-api-66c5785d58-5hgjb" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.550777 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffa5629b-a725-48dd-a0ed-7b5d3b481189-combined-ca-bundle\") pod \"barbican-api-66c5785d58-5hgjb\" (UID: \"ffa5629b-a725-48dd-a0ed-7b5d3b481189\") " pod="openstack/barbican-api-66c5785d58-5hgjb" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.554728 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ffa5629b-a725-48dd-a0ed-7b5d3b481189-logs\") pod \"barbican-api-66c5785d58-5hgjb\" (UID: \"ffa5629b-a725-48dd-a0ed-7b5d3b481189\") " pod="openstack/barbican-api-66c5785d58-5hgjb" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.576425 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ffa5629b-a725-48dd-a0ed-7b5d3b481189-config-data-custom\") pod \"barbican-api-66c5785d58-5hgjb\" (UID: \"ffa5629b-a725-48dd-a0ed-7b5d3b481189\") " pod="openstack/barbican-api-66c5785d58-5hgjb" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.576916 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffa5629b-a725-48dd-a0ed-7b5d3b481189-config-data\") pod \"barbican-api-66c5785d58-5hgjb\" (UID: \"ffa5629b-a725-48dd-a0ed-7b5d3b481189\") " pod="openstack/barbican-api-66c5785d58-5hgjb" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.584410 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffa5629b-a725-48dd-a0ed-7b5d3b481189-combined-ca-bundle\") pod \"barbican-api-66c5785d58-5hgjb\" (UID: \"ffa5629b-a725-48dd-a0ed-7b5d3b481189\") " pod="openstack/barbican-api-66c5785d58-5hgjb" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.595653 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pvhx6\" (UniqueName: \"kubernetes.io/projected/ffa5629b-a725-48dd-a0ed-7b5d3b481189-kube-api-access-pvhx6\") pod \"barbican-api-66c5785d58-5hgjb\" (UID: \"ffa5629b-a725-48dd-a0ed-7b5d3b481189\") " pod="openstack/barbican-api-66c5785d58-5hgjb" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.600986 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-66c5785d58-5hgjb" Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.695234 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-77fc7f8568-9mx5z"] Nov 24 08:14:00 crc kubenswrapper[4691]: I1124 08:14:00.820376 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="70e09fe5-1d59-4585-a97c-0fac3f622b07" path="/var/lib/kubelet/pods/70e09fe5-1d59-4585-a97c-0fac3f622b07/volumes" Nov 24 08:14:01 crc kubenswrapper[4691]: I1124 08:14:01.125619 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-77fc7f8568-9mx5z" event={"ID":"acaed1f5-7a77-46a1-936d-e0fa2a02767b","Type":"ContainerStarted","Data":"aaa02bb29db1e5669257515e9d89a2ecdceec2d586a6df6529a4da45bac4ce29"} Nov 24 08:14:01 crc kubenswrapper[4691]: I1124 08:14:01.137206 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-vsrzv" event={"ID":"29ff644c-aef6-4092-9dcf-1b4562e662d4","Type":"ContainerStarted","Data":"f47dc7fc96bcd12e987f996a20a019c09ad4dc94d54b28f91317c714fd6479dc"} Nov 24 08:14:01 crc kubenswrapper[4691]: I1124 08:14:01.142747 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 24 08:14:01 crc kubenswrapper[4691]: I1124 08:14:01.142777 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 24 08:14:01 crc kubenswrapper[4691]: I1124 08:14:01.211128 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-vsrzv" podStartSLOduration=4.719516292 podStartE2EDuration="47.211106715s" podCreationTimestamp="2025-11-24 08:13:14 +0000 UTC" firstStartedPulling="2025-11-24 08:13:16.786950404 +0000 UTC m=+958.785899663" lastFinishedPulling="2025-11-24 08:13:59.278540837 +0000 UTC m=+1001.277490086" observedRunningTime="2025-11-24 08:14:01.182251919 +0000 UTC m=+1003.181201178" watchObservedRunningTime="2025-11-24 08:14:01.211106715 +0000 UTC m=+1003.210055974" Nov 24 08:14:01 crc kubenswrapper[4691]: I1124 08:14:01.214596 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 24 08:14:01 crc kubenswrapper[4691]: I1124 08:14:01.214641 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 24 08:14:01 crc kubenswrapper[4691]: I1124 08:14:01.226359 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-6866b57cd6-xcpbl"] Nov 24 08:14:01 crc kubenswrapper[4691]: I1124 08:14:01.255524 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-66c5785d58-5hgjb"] Nov 24 08:14:01 crc kubenswrapper[4691]: I1124 08:14:01.276461 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-bcd6fbf67-bnwn2"] Nov 24 08:14:01 crc kubenswrapper[4691]: I1124 08:14:01.309749 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-2xhk7"] Nov 24 08:14:01 crc kubenswrapper[4691]: I1124 08:14:01.327345 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 24 08:14:01 crc kubenswrapper[4691]: I1124 08:14:01.331686 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 24 08:14:02 crc kubenswrapper[4691]: I1124 08:14:02.183961 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-66c5785d58-5hgjb" event={"ID":"ffa5629b-a725-48dd-a0ed-7b5d3b481189","Type":"ContainerStarted","Data":"f2b2184bf03e574e68fac0c44246a3070995e4c343ddb737b3e4048bacc9f886"} Nov 24 08:14:02 crc kubenswrapper[4691]: I1124 08:14:02.184240 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-66c5785d58-5hgjb" event={"ID":"ffa5629b-a725-48dd-a0ed-7b5d3b481189","Type":"ContainerStarted","Data":"a3b31d0eb3cef859bf8410a0395cb07ab214ebf4e571831865ee4c57eb56894b"} Nov 24 08:14:02 crc kubenswrapper[4691]: I1124 08:14:02.184252 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-66c5785d58-5hgjb" event={"ID":"ffa5629b-a725-48dd-a0ed-7b5d3b481189","Type":"ContainerStarted","Data":"eb08d1a76a3eda79286f5a0fb0ba0b33527decf4f7ce01bffdb9b6e5a667eb31"} Nov 24 08:14:02 crc kubenswrapper[4691]: I1124 08:14:02.184623 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-66c5785d58-5hgjb" Nov 24 08:14:02 crc kubenswrapper[4691]: I1124 08:14:02.184698 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-66c5785d58-5hgjb" Nov 24 08:14:02 crc kubenswrapper[4691]: I1124 08:14:02.188080 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6866b57cd6-xcpbl" event={"ID":"750147cd-32ed-4f3d-83e5-96798011bf10","Type":"ContainerStarted","Data":"27e243797e0e97daadea8736eda6da8dc9c0b5ff1995b2521151b2689426efda"} Nov 24 08:14:02 crc kubenswrapper[4691]: I1124 08:14:02.193631 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-77fc7f8568-9mx5z" event={"ID":"acaed1f5-7a77-46a1-936d-e0fa2a02767b","Type":"ContainerStarted","Data":"c56708b2eab4f630edd24793c7a211584d5e5d10febd4f2ac883a07de9be154f"} Nov 24 08:14:02 crc kubenswrapper[4691]: I1124 08:14:02.193665 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-77fc7f8568-9mx5z" event={"ID":"acaed1f5-7a77-46a1-936d-e0fa2a02767b","Type":"ContainerStarted","Data":"ab5e973a20b74738f37f0ee310a72a01d7d7c5fcd8e654ef650fc2f5204d5378"} Nov 24 08:14:02 crc kubenswrapper[4691]: I1124 08:14:02.194441 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-77fc7f8568-9mx5z" Nov 24 08:14:02 crc kubenswrapper[4691]: I1124 08:14:02.194481 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-77fc7f8568-9mx5z" Nov 24 08:14:02 crc kubenswrapper[4691]: I1124 08:14:02.201162 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-bcd6fbf67-bnwn2" event={"ID":"e39f7b55-5583-421f-a817-bae68533b497","Type":"ContainerStarted","Data":"e7f3973ac9c6a1c2d8e0c67e8d964bbe23b3e42896e9120a63207dc3371bc485"} Nov 24 08:14:02 crc kubenswrapper[4691]: I1124 08:14:02.207818 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-66c5785d58-5hgjb" podStartSLOduration=2.20780087 podStartE2EDuration="2.20780087s" podCreationTimestamp="2025-11-24 08:14:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:14:02.204532415 +0000 UTC m=+1004.203481664" watchObservedRunningTime="2025-11-24 08:14:02.20780087 +0000 UTC m=+1004.206750119" Nov 24 08:14:02 crc kubenswrapper[4691]: I1124 08:14:02.219321 4691 generic.go:334] "Generic (PLEG): container finished" podID="aad353e5-274f-4f13-b270-f8bf589fbc3e" containerID="0fcfbe5f674a7d4e98fda2a00e457012ef2e94ab1c0babfff16f6aba2061d4b2" exitCode=0 Nov 24 08:14:02 crc kubenswrapper[4691]: I1124 08:14:02.219578 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-2xhk7" event={"ID":"aad353e5-274f-4f13-b270-f8bf589fbc3e","Type":"ContainerDied","Data":"0fcfbe5f674a7d4e98fda2a00e457012ef2e94ab1c0babfff16f6aba2061d4b2"} Nov 24 08:14:02 crc kubenswrapper[4691]: I1124 08:14:02.219640 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-2xhk7" event={"ID":"aad353e5-274f-4f13-b270-f8bf589fbc3e","Type":"ContainerStarted","Data":"11a14fd715095cf6a3cbe57d26d07bd04c83863f8711565bef2b686328eaba7d"} Nov 24 08:14:02 crc kubenswrapper[4691]: I1124 08:14:02.220797 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 24 08:14:02 crc kubenswrapper[4691]: I1124 08:14:02.220827 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 24 08:14:02 crc kubenswrapper[4691]: I1124 08:14:02.241349 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-77fc7f8568-9mx5z" podStartSLOduration=3.241324412 podStartE2EDuration="3.241324412s" podCreationTimestamp="2025-11-24 08:13:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:14:02.229950222 +0000 UTC m=+1004.228899481" watchObservedRunningTime="2025-11-24 08:14:02.241324412 +0000 UTC m=+1004.240273671" Nov 24 08:14:03 crc kubenswrapper[4691]: I1124 08:14:03.260516 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-2xhk7" event={"ID":"aad353e5-274f-4f13-b270-f8bf589fbc3e","Type":"ContainerStarted","Data":"9105d6b4dce93a0f052aa110345eaf210644f8949f831f138fe7e86f269aff90"} Nov 24 08:14:03 crc kubenswrapper[4691]: I1124 08:14:03.261700 4691 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 08:14:03 crc kubenswrapper[4691]: I1124 08:14:03.261721 4691 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 08:14:03 crc kubenswrapper[4691]: I1124 08:14:03.262518 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-848cf88cfc-2xhk7" Nov 24 08:14:03 crc kubenswrapper[4691]: I1124 08:14:03.290593 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-848cf88cfc-2xhk7" podStartSLOduration=4.290574649 podStartE2EDuration="4.290574649s" podCreationTimestamp="2025-11-24 08:13:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:14:03.288220701 +0000 UTC m=+1005.287169970" watchObservedRunningTime="2025-11-24 08:14:03.290574649 +0000 UTC m=+1005.289523898" Nov 24 08:14:03 crc kubenswrapper[4691]: I1124 08:14:03.411253 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-7d7c46cd68-xl465"] Nov 24 08:14:03 crc kubenswrapper[4691]: I1124 08:14:03.413035 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7d7c46cd68-xl465" Nov 24 08:14:03 crc kubenswrapper[4691]: I1124 08:14:03.419965 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 24 08:14:03 crc kubenswrapper[4691]: I1124 08:14:03.420152 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 24 08:14:03 crc kubenswrapper[4691]: I1124 08:14:03.450635 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7d7c46cd68-xl465"] Nov 24 08:14:03 crc kubenswrapper[4691]: I1124 08:14:03.533009 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f904b6e6-711f-4edd-bdaf-1eeca5979318-logs\") pod \"barbican-api-7d7c46cd68-xl465\" (UID: \"f904b6e6-711f-4edd-bdaf-1eeca5979318\") " pod="openstack/barbican-api-7d7c46cd68-xl465" Nov 24 08:14:03 crc kubenswrapper[4691]: I1124 08:14:03.533104 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f904b6e6-711f-4edd-bdaf-1eeca5979318-config-data\") pod \"barbican-api-7d7c46cd68-xl465\" (UID: \"f904b6e6-711f-4edd-bdaf-1eeca5979318\") " pod="openstack/barbican-api-7d7c46cd68-xl465" Nov 24 08:14:03 crc kubenswrapper[4691]: I1124 08:14:03.533168 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f904b6e6-711f-4edd-bdaf-1eeca5979318-public-tls-certs\") pod \"barbican-api-7d7c46cd68-xl465\" (UID: \"f904b6e6-711f-4edd-bdaf-1eeca5979318\") " pod="openstack/barbican-api-7d7c46cd68-xl465" Nov 24 08:14:03 crc kubenswrapper[4691]: I1124 08:14:03.533248 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f904b6e6-711f-4edd-bdaf-1eeca5979318-config-data-custom\") pod \"barbican-api-7d7c46cd68-xl465\" (UID: \"f904b6e6-711f-4edd-bdaf-1eeca5979318\") " pod="openstack/barbican-api-7d7c46cd68-xl465" Nov 24 08:14:03 crc kubenswrapper[4691]: I1124 08:14:03.533275 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nr4dn\" (UniqueName: \"kubernetes.io/projected/f904b6e6-711f-4edd-bdaf-1eeca5979318-kube-api-access-nr4dn\") pod \"barbican-api-7d7c46cd68-xl465\" (UID: \"f904b6e6-711f-4edd-bdaf-1eeca5979318\") " pod="openstack/barbican-api-7d7c46cd68-xl465" Nov 24 08:14:03 crc kubenswrapper[4691]: I1124 08:14:03.533343 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f904b6e6-711f-4edd-bdaf-1eeca5979318-internal-tls-certs\") pod \"barbican-api-7d7c46cd68-xl465\" (UID: \"f904b6e6-711f-4edd-bdaf-1eeca5979318\") " pod="openstack/barbican-api-7d7c46cd68-xl465" Nov 24 08:14:03 crc kubenswrapper[4691]: I1124 08:14:03.533380 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f904b6e6-711f-4edd-bdaf-1eeca5979318-combined-ca-bundle\") pod \"barbican-api-7d7c46cd68-xl465\" (UID: \"f904b6e6-711f-4edd-bdaf-1eeca5979318\") " pod="openstack/barbican-api-7d7c46cd68-xl465" Nov 24 08:14:03 crc kubenswrapper[4691]: I1124 08:14:03.635105 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f904b6e6-711f-4edd-bdaf-1eeca5979318-public-tls-certs\") pod \"barbican-api-7d7c46cd68-xl465\" (UID: \"f904b6e6-711f-4edd-bdaf-1eeca5979318\") " pod="openstack/barbican-api-7d7c46cd68-xl465" Nov 24 08:14:03 crc kubenswrapper[4691]: I1124 08:14:03.635158 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f904b6e6-711f-4edd-bdaf-1eeca5979318-config-data-custom\") pod \"barbican-api-7d7c46cd68-xl465\" (UID: \"f904b6e6-711f-4edd-bdaf-1eeca5979318\") " pod="openstack/barbican-api-7d7c46cd68-xl465" Nov 24 08:14:03 crc kubenswrapper[4691]: I1124 08:14:03.635195 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nr4dn\" (UniqueName: \"kubernetes.io/projected/f904b6e6-711f-4edd-bdaf-1eeca5979318-kube-api-access-nr4dn\") pod \"barbican-api-7d7c46cd68-xl465\" (UID: \"f904b6e6-711f-4edd-bdaf-1eeca5979318\") " pod="openstack/barbican-api-7d7c46cd68-xl465" Nov 24 08:14:03 crc kubenswrapper[4691]: I1124 08:14:03.635278 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f904b6e6-711f-4edd-bdaf-1eeca5979318-internal-tls-certs\") pod \"barbican-api-7d7c46cd68-xl465\" (UID: \"f904b6e6-711f-4edd-bdaf-1eeca5979318\") " pod="openstack/barbican-api-7d7c46cd68-xl465" Nov 24 08:14:03 crc kubenswrapper[4691]: I1124 08:14:03.635311 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f904b6e6-711f-4edd-bdaf-1eeca5979318-combined-ca-bundle\") pod \"barbican-api-7d7c46cd68-xl465\" (UID: \"f904b6e6-711f-4edd-bdaf-1eeca5979318\") " pod="openstack/barbican-api-7d7c46cd68-xl465" Nov 24 08:14:03 crc kubenswrapper[4691]: I1124 08:14:03.635355 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f904b6e6-711f-4edd-bdaf-1eeca5979318-logs\") pod \"barbican-api-7d7c46cd68-xl465\" (UID: \"f904b6e6-711f-4edd-bdaf-1eeca5979318\") " pod="openstack/barbican-api-7d7c46cd68-xl465" Nov 24 08:14:03 crc kubenswrapper[4691]: I1124 08:14:03.635425 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f904b6e6-711f-4edd-bdaf-1eeca5979318-config-data\") pod \"barbican-api-7d7c46cd68-xl465\" (UID: \"f904b6e6-711f-4edd-bdaf-1eeca5979318\") " pod="openstack/barbican-api-7d7c46cd68-xl465" Nov 24 08:14:03 crc kubenswrapper[4691]: I1124 08:14:03.636315 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f904b6e6-711f-4edd-bdaf-1eeca5979318-logs\") pod \"barbican-api-7d7c46cd68-xl465\" (UID: \"f904b6e6-711f-4edd-bdaf-1eeca5979318\") " pod="openstack/barbican-api-7d7c46cd68-xl465" Nov 24 08:14:03 crc kubenswrapper[4691]: I1124 08:14:03.647225 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f904b6e6-711f-4edd-bdaf-1eeca5979318-config-data-custom\") pod \"barbican-api-7d7c46cd68-xl465\" (UID: \"f904b6e6-711f-4edd-bdaf-1eeca5979318\") " pod="openstack/barbican-api-7d7c46cd68-xl465" Nov 24 08:14:03 crc kubenswrapper[4691]: I1124 08:14:03.664288 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f904b6e6-711f-4edd-bdaf-1eeca5979318-combined-ca-bundle\") pod \"barbican-api-7d7c46cd68-xl465\" (UID: \"f904b6e6-711f-4edd-bdaf-1eeca5979318\") " pod="openstack/barbican-api-7d7c46cd68-xl465" Nov 24 08:14:03 crc kubenswrapper[4691]: I1124 08:14:03.664352 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f904b6e6-711f-4edd-bdaf-1eeca5979318-internal-tls-certs\") pod \"barbican-api-7d7c46cd68-xl465\" (UID: \"f904b6e6-711f-4edd-bdaf-1eeca5979318\") " pod="openstack/barbican-api-7d7c46cd68-xl465" Nov 24 08:14:03 crc kubenswrapper[4691]: I1124 08:14:03.664524 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f904b6e6-711f-4edd-bdaf-1eeca5979318-config-data\") pod \"barbican-api-7d7c46cd68-xl465\" (UID: \"f904b6e6-711f-4edd-bdaf-1eeca5979318\") " pod="openstack/barbican-api-7d7c46cd68-xl465" Nov 24 08:14:03 crc kubenswrapper[4691]: I1124 08:14:03.664761 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f904b6e6-711f-4edd-bdaf-1eeca5979318-public-tls-certs\") pod \"barbican-api-7d7c46cd68-xl465\" (UID: \"f904b6e6-711f-4edd-bdaf-1eeca5979318\") " pod="openstack/barbican-api-7d7c46cd68-xl465" Nov 24 08:14:03 crc kubenswrapper[4691]: I1124 08:14:03.668177 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nr4dn\" (UniqueName: \"kubernetes.io/projected/f904b6e6-711f-4edd-bdaf-1eeca5979318-kube-api-access-nr4dn\") pod \"barbican-api-7d7c46cd68-xl465\" (UID: \"f904b6e6-711f-4edd-bdaf-1eeca5979318\") " pod="openstack/barbican-api-7d7c46cd68-xl465" Nov 24 08:14:03 crc kubenswrapper[4691]: I1124 08:14:03.738060 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7d7c46cd68-xl465" Nov 24 08:14:03 crc kubenswrapper[4691]: I1124 08:14:03.977366 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 24 08:14:04 crc kubenswrapper[4691]: I1124 08:14:04.100946 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-77477f4d7b-kclfz" podUID="567ed4cd-aaf3-4e52-be70-2f723075d545" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.152:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.152:8443: connect: connection refused" Nov 24 08:14:04 crc kubenswrapper[4691]: I1124 08:14:04.215779 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5fb4677cdd-69rb6" podUID="5f7435d6-aa83-41a0-b392-b06d77f53aa2" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.153:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.153:8443: connect: connection refused" Nov 24 08:14:04 crc kubenswrapper[4691]: I1124 08:14:04.291783 4691 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 08:14:04 crc kubenswrapper[4691]: I1124 08:14:04.291810 4691 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 08:14:04 crc kubenswrapper[4691]: I1124 08:14:04.291819 4691 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 08:14:04 crc kubenswrapper[4691]: I1124 08:14:04.844728 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 24 08:14:05 crc kubenswrapper[4691]: I1124 08:14:05.194275 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 24 08:14:05 crc kubenswrapper[4691]: I1124 08:14:05.197282 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 24 08:14:05 crc kubenswrapper[4691]: I1124 08:14:05.626755 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7d7c46cd68-xl465"] Nov 24 08:14:06 crc kubenswrapper[4691]: I1124 08:14:06.314097 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7d7c46cd68-xl465" event={"ID":"f904b6e6-711f-4edd-bdaf-1eeca5979318","Type":"ContainerStarted","Data":"8502273d78d09361a54d971390c18d904a2f355677a41fe83c3be30ec466b3ae"} Nov 24 08:14:06 crc kubenswrapper[4691]: I1124 08:14:06.314572 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7d7c46cd68-xl465" event={"ID":"f904b6e6-711f-4edd-bdaf-1eeca5979318","Type":"ContainerStarted","Data":"964f1fb5b7d6d60c1ad15e06fcd8d5c5bd551c1bfa0ebc55a743b8546bdbf2f0"} Nov 24 08:14:06 crc kubenswrapper[4691]: I1124 08:14:06.314592 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7d7c46cd68-xl465" event={"ID":"f904b6e6-711f-4edd-bdaf-1eeca5979318","Type":"ContainerStarted","Data":"3bc14813f80b08494159917c48e2921bd1bbafe83c44c7e62573dcb1b0bdd3b9"} Nov 24 08:14:06 crc kubenswrapper[4691]: I1124 08:14:06.314617 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7d7c46cd68-xl465" Nov 24 08:14:06 crc kubenswrapper[4691]: I1124 08:14:06.325092 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6866b57cd6-xcpbl" event={"ID":"750147cd-32ed-4f3d-83e5-96798011bf10","Type":"ContainerStarted","Data":"40f285fb8b5256f8e9a113ed320c38a54bd1955644187d0d394d53561ed52602"} Nov 24 08:14:06 crc kubenswrapper[4691]: I1124 08:14:06.325161 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6866b57cd6-xcpbl" event={"ID":"750147cd-32ed-4f3d-83e5-96798011bf10","Type":"ContainerStarted","Data":"9dfc2911bae7b66f2a13dadcd3fbeba403b21906dc3f64d19079efee49b9271f"} Nov 24 08:14:06 crc kubenswrapper[4691]: I1124 08:14:06.332888 4691 generic.go:334] "Generic (PLEG): container finished" podID="29ff644c-aef6-4092-9dcf-1b4562e662d4" containerID="f47dc7fc96bcd12e987f996a20a019c09ad4dc94d54b28f91317c714fd6479dc" exitCode=0 Nov 24 08:14:06 crc kubenswrapper[4691]: I1124 08:14:06.333086 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-vsrzv" event={"ID":"29ff644c-aef6-4092-9dcf-1b4562e662d4","Type":"ContainerDied","Data":"f47dc7fc96bcd12e987f996a20a019c09ad4dc94d54b28f91317c714fd6479dc"} Nov 24 08:14:06 crc kubenswrapper[4691]: I1124 08:14:06.342168 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-bcd6fbf67-bnwn2" event={"ID":"e39f7b55-5583-421f-a817-bae68533b497","Type":"ContainerStarted","Data":"1a353e5eb2d420270d7d947b04b1181ec73f83d440886eb301b51c39bee1bc17"} Nov 24 08:14:06 crc kubenswrapper[4691]: I1124 08:14:06.342236 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-bcd6fbf67-bnwn2" event={"ID":"e39f7b55-5583-421f-a817-bae68533b497","Type":"ContainerStarted","Data":"320f7da270f04233ea575021f33cf00bc022a2c82af0a25c8a19d9e93d89368c"} Nov 24 08:14:06 crc kubenswrapper[4691]: I1124 08:14:06.386603 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-7d7c46cd68-xl465" podStartSLOduration=3.386582476 podStartE2EDuration="3.386582476s" podCreationTimestamp="2025-11-24 08:14:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:14:06.358425539 +0000 UTC m=+1008.357374788" watchObservedRunningTime="2025-11-24 08:14:06.386582476 +0000 UTC m=+1008.385531715" Nov 24 08:14:06 crc kubenswrapper[4691]: I1124 08:14:06.405027 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-bcd6fbf67-bnwn2" podStartSLOduration=3.674918798 podStartE2EDuration="7.405005149s" podCreationTimestamp="2025-11-24 08:13:59 +0000 UTC" firstStartedPulling="2025-11-24 08:14:01.306685505 +0000 UTC m=+1003.305634754" lastFinishedPulling="2025-11-24 08:14:05.036771856 +0000 UTC m=+1007.035721105" observedRunningTime="2025-11-24 08:14:06.40020992 +0000 UTC m=+1008.399159169" watchObservedRunningTime="2025-11-24 08:14:06.405005149 +0000 UTC m=+1008.403954398" Nov 24 08:14:06 crc kubenswrapper[4691]: I1124 08:14:06.438169 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-6866b57cd6-xcpbl" podStartSLOduration=3.663068263 podStartE2EDuration="7.438146479s" podCreationTimestamp="2025-11-24 08:13:59 +0000 UTC" firstStartedPulling="2025-11-24 08:14:01.258361434 +0000 UTC m=+1003.257310683" lastFinishedPulling="2025-11-24 08:14:05.03343965 +0000 UTC m=+1007.032388899" observedRunningTime="2025-11-24 08:14:06.42783809 +0000 UTC m=+1008.426787339" watchObservedRunningTime="2025-11-24 08:14:06.438146479 +0000 UTC m=+1008.437095728" Nov 24 08:14:07 crc kubenswrapper[4691]: I1124 08:14:07.349600 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7d7c46cd68-xl465" Nov 24 08:14:07 crc kubenswrapper[4691]: I1124 08:14:07.921924 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-vsrzv" Nov 24 08:14:07 crc kubenswrapper[4691]: I1124 08:14:07.934664 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/29ff644c-aef6-4092-9dcf-1b4562e662d4-scripts\") pod \"29ff644c-aef6-4092-9dcf-1b4562e662d4\" (UID: \"29ff644c-aef6-4092-9dcf-1b4562e662d4\") " Nov 24 08:14:07 crc kubenswrapper[4691]: I1124 08:14:07.934852 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/29ff644c-aef6-4092-9dcf-1b4562e662d4-db-sync-config-data\") pod \"29ff644c-aef6-4092-9dcf-1b4562e662d4\" (UID: \"29ff644c-aef6-4092-9dcf-1b4562e662d4\") " Nov 24 08:14:07 crc kubenswrapper[4691]: I1124 08:14:07.934905 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/29ff644c-aef6-4092-9dcf-1b4562e662d4-etc-machine-id\") pod \"29ff644c-aef6-4092-9dcf-1b4562e662d4\" (UID: \"29ff644c-aef6-4092-9dcf-1b4562e662d4\") " Nov 24 08:14:07 crc kubenswrapper[4691]: I1124 08:14:07.935004 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29ff644c-aef6-4092-9dcf-1b4562e662d4-config-data\") pod \"29ff644c-aef6-4092-9dcf-1b4562e662d4\" (UID: \"29ff644c-aef6-4092-9dcf-1b4562e662d4\") " Nov 24 08:14:07 crc kubenswrapper[4691]: I1124 08:14:07.935026 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4j42\" (UniqueName: \"kubernetes.io/projected/29ff644c-aef6-4092-9dcf-1b4562e662d4-kube-api-access-x4j42\") pod \"29ff644c-aef6-4092-9dcf-1b4562e662d4\" (UID: \"29ff644c-aef6-4092-9dcf-1b4562e662d4\") " Nov 24 08:14:07 crc kubenswrapper[4691]: I1124 08:14:07.935270 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/29ff644c-aef6-4092-9dcf-1b4562e662d4-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "29ff644c-aef6-4092-9dcf-1b4562e662d4" (UID: "29ff644c-aef6-4092-9dcf-1b4562e662d4"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 08:14:07 crc kubenswrapper[4691]: I1124 08:14:07.936229 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29ff644c-aef6-4092-9dcf-1b4562e662d4-combined-ca-bundle\") pod \"29ff644c-aef6-4092-9dcf-1b4562e662d4\" (UID: \"29ff644c-aef6-4092-9dcf-1b4562e662d4\") " Nov 24 08:14:07 crc kubenswrapper[4691]: I1124 08:14:07.937148 4691 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/29ff644c-aef6-4092-9dcf-1b4562e662d4-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:07 crc kubenswrapper[4691]: I1124 08:14:07.943773 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29ff644c-aef6-4092-9dcf-1b4562e662d4-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "29ff644c-aef6-4092-9dcf-1b4562e662d4" (UID: "29ff644c-aef6-4092-9dcf-1b4562e662d4"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:07 crc kubenswrapper[4691]: I1124 08:14:07.957963 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29ff644c-aef6-4092-9dcf-1b4562e662d4-kube-api-access-x4j42" (OuterVolumeSpecName: "kube-api-access-x4j42") pod "29ff644c-aef6-4092-9dcf-1b4562e662d4" (UID: "29ff644c-aef6-4092-9dcf-1b4562e662d4"). InnerVolumeSpecName "kube-api-access-x4j42". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:14:07 crc kubenswrapper[4691]: I1124 08:14:07.963586 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29ff644c-aef6-4092-9dcf-1b4562e662d4-scripts" (OuterVolumeSpecName: "scripts") pod "29ff644c-aef6-4092-9dcf-1b4562e662d4" (UID: "29ff644c-aef6-4092-9dcf-1b4562e662d4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:07 crc kubenswrapper[4691]: I1124 08:14:07.980654 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29ff644c-aef6-4092-9dcf-1b4562e662d4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "29ff644c-aef6-4092-9dcf-1b4562e662d4" (UID: "29ff644c-aef6-4092-9dcf-1b4562e662d4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.009700 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29ff644c-aef6-4092-9dcf-1b4562e662d4-config-data" (OuterVolumeSpecName: "config-data") pod "29ff644c-aef6-4092-9dcf-1b4562e662d4" (UID: "29ff644c-aef6-4092-9dcf-1b4562e662d4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.039816 4691 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/29ff644c-aef6-4092-9dcf-1b4562e662d4-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.039881 4691 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/29ff644c-aef6-4092-9dcf-1b4562e662d4-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.039901 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29ff644c-aef6-4092-9dcf-1b4562e662d4-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.039919 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4j42\" (UniqueName: \"kubernetes.io/projected/29ff644c-aef6-4092-9dcf-1b4562e662d4-kube-api-access-x4j42\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.039932 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29ff644c-aef6-4092-9dcf-1b4562e662d4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.364027 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-vsrzv" event={"ID":"29ff644c-aef6-4092-9dcf-1b4562e662d4","Type":"ContainerDied","Data":"c09484fa238e48a2093c19dfa3052b745691097de7c8495bb8f96e6fb0e12576"} Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.364092 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-vsrzv" Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.364105 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c09484fa238e48a2093c19dfa3052b745691097de7c8495bb8f96e6fb0e12576" Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.841746 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 24 08:14:08 crc kubenswrapper[4691]: E1124 08:14:08.842527 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29ff644c-aef6-4092-9dcf-1b4562e662d4" containerName="cinder-db-sync" Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.842541 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="29ff644c-aef6-4092-9dcf-1b4562e662d4" containerName="cinder-db-sync" Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.842758 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="29ff644c-aef6-4092-9dcf-1b4562e662d4" containerName="cinder-db-sync" Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.843751 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.851716 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-2xhk7"] Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.853715 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-8q2qr" Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.853883 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.855921 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-848cf88cfc-2xhk7" podUID="aad353e5-274f-4f13-b270-f8bf589fbc3e" containerName="dnsmasq-dns" containerID="cri-o://9105d6b4dce93a0f052aa110345eaf210644f8949f831f138fe7e86f269aff90" gracePeriod=10 Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.853991 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.854023 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.857592 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-848cf88cfc-2xhk7" Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.871371 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6d55f9d-af21-413d-8755-dd9af8386c23-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"c6d55f9d-af21-413d-8755-dd9af8386c23\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.871415 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6d55f9d-af21-413d-8755-dd9af8386c23-scripts\") pod \"cinder-scheduler-0\" (UID: \"c6d55f9d-af21-413d-8755-dd9af8386c23\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.872778 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c6d55f9d-af21-413d-8755-dd9af8386c23-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"c6d55f9d-af21-413d-8755-dd9af8386c23\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.872886 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z9dxm\" (UniqueName: \"kubernetes.io/projected/c6d55f9d-af21-413d-8755-dd9af8386c23-kube-api-access-z9dxm\") pod \"cinder-scheduler-0\" (UID: \"c6d55f9d-af21-413d-8755-dd9af8386c23\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.872912 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c6d55f9d-af21-413d-8755-dd9af8386c23-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"c6d55f9d-af21-413d-8755-dd9af8386c23\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.872972 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6d55f9d-af21-413d-8755-dd9af8386c23-config-data\") pod \"cinder-scheduler-0\" (UID: \"c6d55f9d-af21-413d-8755-dd9af8386c23\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.888642 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.975324 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6d55f9d-af21-413d-8755-dd9af8386c23-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"c6d55f9d-af21-413d-8755-dd9af8386c23\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.975386 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6d55f9d-af21-413d-8755-dd9af8386c23-scripts\") pod \"cinder-scheduler-0\" (UID: \"c6d55f9d-af21-413d-8755-dd9af8386c23\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.975544 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c6d55f9d-af21-413d-8755-dd9af8386c23-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"c6d55f9d-af21-413d-8755-dd9af8386c23\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.975600 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z9dxm\" (UniqueName: \"kubernetes.io/projected/c6d55f9d-af21-413d-8755-dd9af8386c23-kube-api-access-z9dxm\") pod \"cinder-scheduler-0\" (UID: \"c6d55f9d-af21-413d-8755-dd9af8386c23\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.975622 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c6d55f9d-af21-413d-8755-dd9af8386c23-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"c6d55f9d-af21-413d-8755-dd9af8386c23\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.975666 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6d55f9d-af21-413d-8755-dd9af8386c23-config-data\") pod \"cinder-scheduler-0\" (UID: \"c6d55f9d-af21-413d-8755-dd9af8386c23\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.976910 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c6d55f9d-af21-413d-8755-dd9af8386c23-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"c6d55f9d-af21-413d-8755-dd9af8386c23\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.986495 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6d55f9d-af21-413d-8755-dd9af8386c23-config-data\") pod \"cinder-scheduler-0\" (UID: \"c6d55f9d-af21-413d-8755-dd9af8386c23\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.987284 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6d55f9d-af21-413d-8755-dd9af8386c23-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"c6d55f9d-af21-413d-8755-dd9af8386c23\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:08 crc kubenswrapper[4691]: I1124 08:14:08.996754 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c6d55f9d-af21-413d-8755-dd9af8386c23-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"c6d55f9d-af21-413d-8755-dd9af8386c23\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.000904 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-cbv5w"] Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.003145 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-cbv5w" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.036097 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6d55f9d-af21-413d-8755-dd9af8386c23-scripts\") pod \"cinder-scheduler-0\" (UID: \"c6d55f9d-af21-413d-8755-dd9af8386c23\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.037008 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z9dxm\" (UniqueName: \"kubernetes.io/projected/c6d55f9d-af21-413d-8755-dd9af8386c23-kube-api-access-z9dxm\") pod \"cinder-scheduler-0\" (UID: \"c6d55f9d-af21-413d-8755-dd9af8386c23\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.095023 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/425954a5-8127-4cbe-879e-ae3124e74ee6-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-cbv5w\" (UID: \"425954a5-8127-4cbe-879e-ae3124e74ee6\") " pod="openstack/dnsmasq-dns-6578955fd5-cbv5w" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.095134 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/425954a5-8127-4cbe-879e-ae3124e74ee6-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-cbv5w\" (UID: \"425954a5-8127-4cbe-879e-ae3124e74ee6\") " pod="openstack/dnsmasq-dns-6578955fd5-cbv5w" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.095165 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/425954a5-8127-4cbe-879e-ae3124e74ee6-config\") pod \"dnsmasq-dns-6578955fd5-cbv5w\" (UID: \"425954a5-8127-4cbe-879e-ae3124e74ee6\") " pod="openstack/dnsmasq-dns-6578955fd5-cbv5w" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.095187 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/425954a5-8127-4cbe-879e-ae3124e74ee6-dns-svc\") pod \"dnsmasq-dns-6578955fd5-cbv5w\" (UID: \"425954a5-8127-4cbe-879e-ae3124e74ee6\") " pod="openstack/dnsmasq-dns-6578955fd5-cbv5w" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.095224 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/425954a5-8127-4cbe-879e-ae3124e74ee6-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-cbv5w\" (UID: \"425954a5-8127-4cbe-879e-ae3124e74ee6\") " pod="openstack/dnsmasq-dns-6578955fd5-cbv5w" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.095344 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9zqcm\" (UniqueName: \"kubernetes.io/projected/425954a5-8127-4cbe-879e-ae3124e74ee6-kube-api-access-9zqcm\") pod \"dnsmasq-dns-6578955fd5-cbv5w\" (UID: \"425954a5-8127-4cbe-879e-ae3124e74ee6\") " pod="openstack/dnsmasq-dns-6578955fd5-cbv5w" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.119107 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-cbv5w"] Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.174546 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.176737 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.181107 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.203935 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.204948 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9zqcm\" (UniqueName: \"kubernetes.io/projected/425954a5-8127-4cbe-879e-ae3124e74ee6-kube-api-access-9zqcm\") pod \"dnsmasq-dns-6578955fd5-cbv5w\" (UID: \"425954a5-8127-4cbe-879e-ae3124e74ee6\") " pod="openstack/dnsmasq-dns-6578955fd5-cbv5w" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.205060 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4n9zl\" (UniqueName: \"kubernetes.io/projected/a8f1c7da-eb94-4115-baf0-f15d335d85e0-kube-api-access-4n9zl\") pod \"cinder-api-0\" (UID: \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\") " pod="openstack/cinder-api-0" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.205101 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8f1c7da-eb94-4115-baf0-f15d335d85e0-scripts\") pod \"cinder-api-0\" (UID: \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\") " pod="openstack/cinder-api-0" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.205177 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8f1c7da-eb94-4115-baf0-f15d335d85e0-config-data\") pod \"cinder-api-0\" (UID: \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\") " pod="openstack/cinder-api-0" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.205207 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/425954a5-8127-4cbe-879e-ae3124e74ee6-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-cbv5w\" (UID: \"425954a5-8127-4cbe-879e-ae3124e74ee6\") " pod="openstack/dnsmasq-dns-6578955fd5-cbv5w" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.205247 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/425954a5-8127-4cbe-879e-ae3124e74ee6-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-cbv5w\" (UID: \"425954a5-8127-4cbe-879e-ae3124e74ee6\") " pod="openstack/dnsmasq-dns-6578955fd5-cbv5w" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.205275 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/425954a5-8127-4cbe-879e-ae3124e74ee6-config\") pod \"dnsmasq-dns-6578955fd5-cbv5w\" (UID: \"425954a5-8127-4cbe-879e-ae3124e74ee6\") " pod="openstack/dnsmasq-dns-6578955fd5-cbv5w" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.205295 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/425954a5-8127-4cbe-879e-ae3124e74ee6-dns-svc\") pod \"dnsmasq-dns-6578955fd5-cbv5w\" (UID: \"425954a5-8127-4cbe-879e-ae3124e74ee6\") " pod="openstack/dnsmasq-dns-6578955fd5-cbv5w" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.205318 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/425954a5-8127-4cbe-879e-ae3124e74ee6-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-cbv5w\" (UID: \"425954a5-8127-4cbe-879e-ae3124e74ee6\") " pod="openstack/dnsmasq-dns-6578955fd5-cbv5w" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.205348 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8f1c7da-eb94-4115-baf0-f15d335d85e0-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\") " pod="openstack/cinder-api-0" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.205376 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a8f1c7da-eb94-4115-baf0-f15d335d85e0-config-data-custom\") pod \"cinder-api-0\" (UID: \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\") " pod="openstack/cinder-api-0" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.205406 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a8f1c7da-eb94-4115-baf0-f15d335d85e0-etc-machine-id\") pod \"cinder-api-0\" (UID: \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\") " pod="openstack/cinder-api-0" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.205434 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a8f1c7da-eb94-4115-baf0-f15d335d85e0-logs\") pod \"cinder-api-0\" (UID: \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\") " pod="openstack/cinder-api-0" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.207307 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/425954a5-8127-4cbe-879e-ae3124e74ee6-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-cbv5w\" (UID: \"425954a5-8127-4cbe-879e-ae3124e74ee6\") " pod="openstack/dnsmasq-dns-6578955fd5-cbv5w" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.209008 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.211330 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/425954a5-8127-4cbe-879e-ae3124e74ee6-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-cbv5w\" (UID: \"425954a5-8127-4cbe-879e-ae3124e74ee6\") " pod="openstack/dnsmasq-dns-6578955fd5-cbv5w" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.211342 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/425954a5-8127-4cbe-879e-ae3124e74ee6-config\") pod \"dnsmasq-dns-6578955fd5-cbv5w\" (UID: \"425954a5-8127-4cbe-879e-ae3124e74ee6\") " pod="openstack/dnsmasq-dns-6578955fd5-cbv5w" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.211965 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/425954a5-8127-4cbe-879e-ae3124e74ee6-dns-svc\") pod \"dnsmasq-dns-6578955fd5-cbv5w\" (UID: \"425954a5-8127-4cbe-879e-ae3124e74ee6\") " pod="openstack/dnsmasq-dns-6578955fd5-cbv5w" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.213605 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/425954a5-8127-4cbe-879e-ae3124e74ee6-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-cbv5w\" (UID: \"425954a5-8127-4cbe-879e-ae3124e74ee6\") " pod="openstack/dnsmasq-dns-6578955fd5-cbv5w" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.235539 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9zqcm\" (UniqueName: \"kubernetes.io/projected/425954a5-8127-4cbe-879e-ae3124e74ee6-kube-api-access-9zqcm\") pod \"dnsmasq-dns-6578955fd5-cbv5w\" (UID: \"425954a5-8127-4cbe-879e-ae3124e74ee6\") " pod="openstack/dnsmasq-dns-6578955fd5-cbv5w" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.306827 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8f1c7da-eb94-4115-baf0-f15d335d85e0-config-data\") pod \"cinder-api-0\" (UID: \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\") " pod="openstack/cinder-api-0" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.307318 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8f1c7da-eb94-4115-baf0-f15d335d85e0-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\") " pod="openstack/cinder-api-0" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.307339 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a8f1c7da-eb94-4115-baf0-f15d335d85e0-config-data-custom\") pod \"cinder-api-0\" (UID: \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\") " pod="openstack/cinder-api-0" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.307367 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a8f1c7da-eb94-4115-baf0-f15d335d85e0-etc-machine-id\") pod \"cinder-api-0\" (UID: \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\") " pod="openstack/cinder-api-0" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.307387 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a8f1c7da-eb94-4115-baf0-f15d335d85e0-logs\") pod \"cinder-api-0\" (UID: \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\") " pod="openstack/cinder-api-0" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.307445 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4n9zl\" (UniqueName: \"kubernetes.io/projected/a8f1c7da-eb94-4115-baf0-f15d335d85e0-kube-api-access-4n9zl\") pod \"cinder-api-0\" (UID: \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\") " pod="openstack/cinder-api-0" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.307486 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8f1c7da-eb94-4115-baf0-f15d335d85e0-scripts\") pod \"cinder-api-0\" (UID: \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\") " pod="openstack/cinder-api-0" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.308173 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a8f1c7da-eb94-4115-baf0-f15d335d85e0-logs\") pod \"cinder-api-0\" (UID: \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\") " pod="openstack/cinder-api-0" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.308289 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a8f1c7da-eb94-4115-baf0-f15d335d85e0-etc-machine-id\") pod \"cinder-api-0\" (UID: \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\") " pod="openstack/cinder-api-0" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.311962 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8f1c7da-eb94-4115-baf0-f15d335d85e0-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\") " pod="openstack/cinder-api-0" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.323545 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8f1c7da-eb94-4115-baf0-f15d335d85e0-config-data\") pod \"cinder-api-0\" (UID: \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\") " pod="openstack/cinder-api-0" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.326122 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8f1c7da-eb94-4115-baf0-f15d335d85e0-scripts\") pod \"cinder-api-0\" (UID: \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\") " pod="openstack/cinder-api-0" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.329918 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4n9zl\" (UniqueName: \"kubernetes.io/projected/a8f1c7da-eb94-4115-baf0-f15d335d85e0-kube-api-access-4n9zl\") pod \"cinder-api-0\" (UID: \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\") " pod="openstack/cinder-api-0" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.349056 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a8f1c7da-eb94-4115-baf0-f15d335d85e0-config-data-custom\") pod \"cinder-api-0\" (UID: \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\") " pod="openstack/cinder-api-0" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.380952 4691 generic.go:334] "Generic (PLEG): container finished" podID="aad353e5-274f-4f13-b270-f8bf589fbc3e" containerID="9105d6b4dce93a0f052aa110345eaf210644f8949f831f138fe7e86f269aff90" exitCode=0 Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.380996 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-2xhk7" event={"ID":"aad353e5-274f-4f13-b270-f8bf589fbc3e","Type":"ContainerDied","Data":"9105d6b4dce93a0f052aa110345eaf210644f8949f831f138fe7e86f269aff90"} Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.494551 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-cbv5w" Nov 24 08:14:09 crc kubenswrapper[4691]: I1124 08:14:09.508869 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 24 08:14:10 crc kubenswrapper[4691]: I1124 08:14:10.479292 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-848cf88cfc-2xhk7" podUID="aad353e5-274f-4f13-b270-f8bf589fbc3e" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.164:5353: connect: connection refused" Nov 24 08:14:11 crc kubenswrapper[4691]: I1124 08:14:11.030618 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 24 08:14:12 crc kubenswrapper[4691]: I1124 08:14:12.471953 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-66c5785d58-5hgjb" Nov 24 08:14:12 crc kubenswrapper[4691]: I1124 08:14:12.510529 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-66c5785d58-5hgjb" Nov 24 08:14:14 crc kubenswrapper[4691]: I1124 08:14:14.098395 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-77477f4d7b-kclfz" podUID="567ed4cd-aaf3-4e52-be70-2f723075d545" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.152:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.152:8443: connect: connection refused" Nov 24 08:14:14 crc kubenswrapper[4691]: I1124 08:14:14.214570 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5fb4677cdd-69rb6" podUID="5f7435d6-aa83-41a0-b392-b06d77f53aa2" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.153:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.153:8443: connect: connection refused" Nov 24 08:14:15 crc kubenswrapper[4691]: I1124 08:14:15.194200 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-cbv5w"] Nov 24 08:14:15 crc kubenswrapper[4691]: W1124 08:14:15.324593 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod425954a5_8127_4cbe_879e_ae3124e74ee6.slice/crio-b3b1644efc91f602b4db044c2ae11ee40e00e853563987e33b7800c728a64685 WatchSource:0}: Error finding container b3b1644efc91f602b4db044c2ae11ee40e00e853563987e33b7800c728a64685: Status 404 returned error can't find the container with id b3b1644efc91f602b4db044c2ae11ee40e00e853563987e33b7800c728a64685 Nov 24 08:14:15 crc kubenswrapper[4691]: I1124 08:14:15.386046 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7d7c46cd68-xl465" Nov 24 08:14:15 crc kubenswrapper[4691]: I1124 08:14:15.477811 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-2xhk7" Nov 24 08:14:15 crc kubenswrapper[4691]: I1124 08:14:15.521166 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-2xhk7" event={"ID":"aad353e5-274f-4f13-b270-f8bf589fbc3e","Type":"ContainerDied","Data":"11a14fd715095cf6a3cbe57d26d07bd04c83863f8711565bef2b686328eaba7d"} Nov 24 08:14:15 crc kubenswrapper[4691]: I1124 08:14:15.521222 4691 scope.go:117] "RemoveContainer" containerID="9105d6b4dce93a0f052aa110345eaf210644f8949f831f138fe7e86f269aff90" Nov 24 08:14:15 crc kubenswrapper[4691]: I1124 08:14:15.521270 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-2xhk7" Nov 24 08:14:15 crc kubenswrapper[4691]: I1124 08:14:15.541174 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-cbv5w" event={"ID":"425954a5-8127-4cbe-879e-ae3124e74ee6","Type":"ContainerStarted","Data":"b3b1644efc91f602b4db044c2ae11ee40e00e853563987e33b7800c728a64685"} Nov 24 08:14:15 crc kubenswrapper[4691]: I1124 08:14:15.606747 4691 scope.go:117] "RemoveContainer" containerID="0fcfbe5f674a7d4e98fda2a00e457012ef2e94ab1c0babfff16f6aba2061d4b2" Nov 24 08:14:15 crc kubenswrapper[4691]: I1124 08:14:15.607261 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7d7c46cd68-xl465" Nov 24 08:14:15 crc kubenswrapper[4691]: I1124 08:14:15.678366 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aad353e5-274f-4f13-b270-f8bf589fbc3e-ovsdbserver-nb\") pod \"aad353e5-274f-4f13-b270-f8bf589fbc3e\" (UID: \"aad353e5-274f-4f13-b270-f8bf589fbc3e\") " Nov 24 08:14:15 crc kubenswrapper[4691]: I1124 08:14:15.678561 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aad353e5-274f-4f13-b270-f8bf589fbc3e-ovsdbserver-sb\") pod \"aad353e5-274f-4f13-b270-f8bf589fbc3e\" (UID: \"aad353e5-274f-4f13-b270-f8bf589fbc3e\") " Nov 24 08:14:15 crc kubenswrapper[4691]: I1124 08:14:15.678867 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t7bvv\" (UniqueName: \"kubernetes.io/projected/aad353e5-274f-4f13-b270-f8bf589fbc3e-kube-api-access-t7bvv\") pod \"aad353e5-274f-4f13-b270-f8bf589fbc3e\" (UID: \"aad353e5-274f-4f13-b270-f8bf589fbc3e\") " Nov 24 08:14:15 crc kubenswrapper[4691]: I1124 08:14:15.678999 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/aad353e5-274f-4f13-b270-f8bf589fbc3e-dns-swift-storage-0\") pod \"aad353e5-274f-4f13-b270-f8bf589fbc3e\" (UID: \"aad353e5-274f-4f13-b270-f8bf589fbc3e\") " Nov 24 08:14:15 crc kubenswrapper[4691]: I1124 08:14:15.679174 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aad353e5-274f-4f13-b270-f8bf589fbc3e-dns-svc\") pod \"aad353e5-274f-4f13-b270-f8bf589fbc3e\" (UID: \"aad353e5-274f-4f13-b270-f8bf589fbc3e\") " Nov 24 08:14:15 crc kubenswrapper[4691]: I1124 08:14:15.679850 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aad353e5-274f-4f13-b270-f8bf589fbc3e-config\") pod \"aad353e5-274f-4f13-b270-f8bf589fbc3e\" (UID: \"aad353e5-274f-4f13-b270-f8bf589fbc3e\") " Nov 24 08:14:15 crc kubenswrapper[4691]: I1124 08:14:15.691582 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-66c5785d58-5hgjb"] Nov 24 08:14:15 crc kubenswrapper[4691]: I1124 08:14:15.691804 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-66c5785d58-5hgjb" podUID="ffa5629b-a725-48dd-a0ed-7b5d3b481189" containerName="barbican-api-log" containerID="cri-o://a3b31d0eb3cef859bf8410a0395cb07ab214ebf4e571831865ee4c57eb56894b" gracePeriod=30 Nov 24 08:14:15 crc kubenswrapper[4691]: I1124 08:14:15.691914 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-66c5785d58-5hgjb" podUID="ffa5629b-a725-48dd-a0ed-7b5d3b481189" containerName="barbican-api" containerID="cri-o://f2b2184bf03e574e68fac0c44246a3070995e4c343ddb737b3e4048bacc9f886" gracePeriod=30 Nov 24 08:14:15 crc kubenswrapper[4691]: I1124 08:14:15.699875 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-66c5785d58-5hgjb" podUID="ffa5629b-a725-48dd-a0ed-7b5d3b481189" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.165:9311/healthcheck\": EOF" Nov 24 08:14:15 crc kubenswrapper[4691]: I1124 08:14:15.706762 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aad353e5-274f-4f13-b270-f8bf589fbc3e-kube-api-access-t7bvv" (OuterVolumeSpecName: "kube-api-access-t7bvv") pod "aad353e5-274f-4f13-b270-f8bf589fbc3e" (UID: "aad353e5-274f-4f13-b270-f8bf589fbc3e"). InnerVolumeSpecName "kube-api-access-t7bvv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:14:15 crc kubenswrapper[4691]: I1124 08:14:15.780425 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aad353e5-274f-4f13-b270-f8bf589fbc3e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "aad353e5-274f-4f13-b270-f8bf589fbc3e" (UID: "aad353e5-274f-4f13-b270-f8bf589fbc3e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:14:15 crc kubenswrapper[4691]: I1124 08:14:15.782351 4691 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aad353e5-274f-4f13-b270-f8bf589fbc3e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:15 crc kubenswrapper[4691]: I1124 08:14:15.782377 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t7bvv\" (UniqueName: \"kubernetes.io/projected/aad353e5-274f-4f13-b270-f8bf589fbc3e-kube-api-access-t7bvv\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:15 crc kubenswrapper[4691]: I1124 08:14:15.805793 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aad353e5-274f-4f13-b270-f8bf589fbc3e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "aad353e5-274f-4f13-b270-f8bf589fbc3e" (UID: "aad353e5-274f-4f13-b270-f8bf589fbc3e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:14:15 crc kubenswrapper[4691]: I1124 08:14:15.833308 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aad353e5-274f-4f13-b270-f8bf589fbc3e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "aad353e5-274f-4f13-b270-f8bf589fbc3e" (UID: "aad353e5-274f-4f13-b270-f8bf589fbc3e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:14:15 crc kubenswrapper[4691]: I1124 08:14:15.874347 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aad353e5-274f-4f13-b270-f8bf589fbc3e-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "aad353e5-274f-4f13-b270-f8bf589fbc3e" (UID: "aad353e5-274f-4f13-b270-f8bf589fbc3e"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:14:15 crc kubenswrapper[4691]: I1124 08:14:15.880150 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 24 08:14:15 crc kubenswrapper[4691]: I1124 08:14:15.885495 4691 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aad353e5-274f-4f13-b270-f8bf589fbc3e-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:15 crc kubenswrapper[4691]: I1124 08:14:15.885547 4691 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aad353e5-274f-4f13-b270-f8bf589fbc3e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:15 crc kubenswrapper[4691]: I1124 08:14:15.885562 4691 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/aad353e5-274f-4f13-b270-f8bf589fbc3e-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:15 crc kubenswrapper[4691]: I1124 08:14:15.892281 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aad353e5-274f-4f13-b270-f8bf589fbc3e-config" (OuterVolumeSpecName: "config") pod "aad353e5-274f-4f13-b270-f8bf589fbc3e" (UID: "aad353e5-274f-4f13-b270-f8bf589fbc3e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:14:15 crc kubenswrapper[4691]: I1124 08:14:15.892385 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 24 08:14:15 crc kubenswrapper[4691]: I1124 08:14:15.987854 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aad353e5-274f-4f13-b270-f8bf589fbc3e-config\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:16 crc kubenswrapper[4691]: I1124 08:14:16.171067 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-2xhk7"] Nov 24 08:14:16 crc kubenswrapper[4691]: I1124 08:14:16.177216 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-2xhk7"] Nov 24 08:14:16 crc kubenswrapper[4691]: I1124 08:14:16.567313 4691 generic.go:334] "Generic (PLEG): container finished" podID="ffa5629b-a725-48dd-a0ed-7b5d3b481189" containerID="a3b31d0eb3cef859bf8410a0395cb07ab214ebf4e571831865ee4c57eb56894b" exitCode=143 Nov 24 08:14:16 crc kubenswrapper[4691]: I1124 08:14:16.567401 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-66c5785d58-5hgjb" event={"ID":"ffa5629b-a725-48dd-a0ed-7b5d3b481189","Type":"ContainerDied","Data":"a3b31d0eb3cef859bf8410a0395cb07ab214ebf4e571831865ee4c57eb56894b"} Nov 24 08:14:16 crc kubenswrapper[4691]: I1124 08:14:16.573031 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3ad1b6c3-36a5-4991-b30f-092d3bf5018b","Type":"ContainerStarted","Data":"7bb3ca0a09089bdd3409fb63c8e5197ff1cb24a0adb8ef60d7304b60a2c759b9"} Nov 24 08:14:16 crc kubenswrapper[4691]: I1124 08:14:16.573136 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3ad1b6c3-36a5-4991-b30f-092d3bf5018b" containerName="ceilometer-central-agent" containerID="cri-o://9d9ab5044ef815a0d40ad37118c7aaa2ab4e0bedf09936d4ea1717456adc8e9b" gracePeriod=30 Nov 24 08:14:16 crc kubenswrapper[4691]: I1124 08:14:16.573188 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3ad1b6c3-36a5-4991-b30f-092d3bf5018b" containerName="proxy-httpd" containerID="cri-o://7bb3ca0a09089bdd3409fb63c8e5197ff1cb24a0adb8ef60d7304b60a2c759b9" gracePeriod=30 Nov 24 08:14:16 crc kubenswrapper[4691]: I1124 08:14:16.573211 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3ad1b6c3-36a5-4991-b30f-092d3bf5018b" containerName="sg-core" containerID="cri-o://d38fd5e7f43b2207b83d5a6320067101a7c4eaae4438986f7b6d5307cbdc017a" gracePeriod=30 Nov 24 08:14:16 crc kubenswrapper[4691]: I1124 08:14:16.573286 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 24 08:14:16 crc kubenswrapper[4691]: I1124 08:14:16.573227 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3ad1b6c3-36a5-4991-b30f-092d3bf5018b" containerName="ceilometer-notification-agent" containerID="cri-o://6ac82b7b26a93e0bfa2b4d452ebac20e4b054f53605396bee598e404148ffb03" gracePeriod=30 Nov 24 08:14:16 crc kubenswrapper[4691]: I1124 08:14:16.575501 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c6d55f9d-af21-413d-8755-dd9af8386c23","Type":"ContainerStarted","Data":"4eda5274b48f3e1f0baed00f4dda0b97fac99ff9d650ba013a4316ee3f5a9059"} Nov 24 08:14:16 crc kubenswrapper[4691]: I1124 08:14:16.579273 4691 generic.go:334] "Generic (PLEG): container finished" podID="425954a5-8127-4cbe-879e-ae3124e74ee6" containerID="d22530161d0a79a36cea83bcd01898027a570e91d88f4f70fc786e6911a37a67" exitCode=0 Nov 24 08:14:16 crc kubenswrapper[4691]: I1124 08:14:16.579344 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-cbv5w" event={"ID":"425954a5-8127-4cbe-879e-ae3124e74ee6","Type":"ContainerDied","Data":"d22530161d0a79a36cea83bcd01898027a570e91d88f4f70fc786e6911a37a67"} Nov 24 08:14:16 crc kubenswrapper[4691]: I1124 08:14:16.589521 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a8f1c7da-eb94-4115-baf0-f15d335d85e0","Type":"ContainerStarted","Data":"90e758c5c136a75ae1f382596cc92dde64a60c39fa6c1e43833aecb926884746"} Nov 24 08:14:16 crc kubenswrapper[4691]: I1124 08:14:16.602323 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.686205786 podStartE2EDuration="1m2.602305517s" podCreationTimestamp="2025-11-24 08:13:14 +0000 UTC" firstStartedPulling="2025-11-24 08:13:16.553611372 +0000 UTC m=+958.552560621" lastFinishedPulling="2025-11-24 08:14:15.469711103 +0000 UTC m=+1017.468660352" observedRunningTime="2025-11-24 08:14:16.597713094 +0000 UTC m=+1018.596662343" watchObservedRunningTime="2025-11-24 08:14:16.602305517 +0000 UTC m=+1018.601254766" Nov 24 08:14:16 crc kubenswrapper[4691]: I1124 08:14:16.775693 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aad353e5-274f-4f13-b270-f8bf589fbc3e" path="/var/lib/kubelet/pods/aad353e5-274f-4f13-b270-f8bf589fbc3e/volumes" Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.297808 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-658647c585-2tj4h" Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.311334 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-7fc5776b84-69d6x" Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.418861 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3a983467-b9da-4795-8cae-f645d8e316b4-config-data\") pod \"3a983467-b9da-4795-8cae-f645d8e316b4\" (UID: \"3a983467-b9da-4795-8cae-f645d8e316b4\") " Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.418991 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a983467-b9da-4795-8cae-f645d8e316b4-logs\") pod \"3a983467-b9da-4795-8cae-f645d8e316b4\" (UID: \"3a983467-b9da-4795-8cae-f645d8e316b4\") " Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.419096 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3a983467-b9da-4795-8cae-f645d8e316b4-horizon-secret-key\") pod \"3a983467-b9da-4795-8cae-f645d8e316b4\" (UID: \"3a983467-b9da-4795-8cae-f645d8e316b4\") " Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.419899 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a983467-b9da-4795-8cae-f645d8e316b4-logs" (OuterVolumeSpecName: "logs") pod "3a983467-b9da-4795-8cae-f645d8e316b4" (UID: "3a983467-b9da-4795-8cae-f645d8e316b4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.420048 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fv6ph\" (UniqueName: \"kubernetes.io/projected/3a983467-b9da-4795-8cae-f645d8e316b4-kube-api-access-fv6ph\") pod \"3a983467-b9da-4795-8cae-f645d8e316b4\" (UID: \"3a983467-b9da-4795-8cae-f645d8e316b4\") " Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.420119 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3a983467-b9da-4795-8cae-f645d8e316b4-scripts\") pod \"3a983467-b9da-4795-8cae-f645d8e316b4\" (UID: \"3a983467-b9da-4795-8cae-f645d8e316b4\") " Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.420779 4691 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a983467-b9da-4795-8cae-f645d8e316b4-logs\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.427211 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a983467-b9da-4795-8cae-f645d8e316b4-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "3a983467-b9da-4795-8cae-f645d8e316b4" (UID: "3a983467-b9da-4795-8cae-f645d8e316b4"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.429737 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a983467-b9da-4795-8cae-f645d8e316b4-kube-api-access-fv6ph" (OuterVolumeSpecName: "kube-api-access-fv6ph") pod "3a983467-b9da-4795-8cae-f645d8e316b4" (UID: "3a983467-b9da-4795-8cae-f645d8e316b4"). InnerVolumeSpecName "kube-api-access-fv6ph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.472945 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a983467-b9da-4795-8cae-f645d8e316b4-scripts" (OuterVolumeSpecName: "scripts") pod "3a983467-b9da-4795-8cae-f645d8e316b4" (UID: "3a983467-b9da-4795-8cae-f645d8e316b4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.493208 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a983467-b9da-4795-8cae-f645d8e316b4-config-data" (OuterVolumeSpecName: "config-data") pod "3a983467-b9da-4795-8cae-f645d8e316b4" (UID: "3a983467-b9da-4795-8cae-f645d8e316b4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.522296 4691 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3a983467-b9da-4795-8cae-f645d8e316b4-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.522335 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fv6ph\" (UniqueName: \"kubernetes.io/projected/3a983467-b9da-4795-8cae-f645d8e316b4-kube-api-access-fv6ph\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.522348 4691 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3a983467-b9da-4795-8cae-f645d8e316b4-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.522359 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3a983467-b9da-4795-8cae-f645d8e316b4-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.623910 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a8f1c7da-eb94-4115-baf0-f15d335d85e0","Type":"ContainerStarted","Data":"93b489312118032a814911f7cb2dae24b326939a8ba73c3e954d17570b879b09"} Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.624426 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a8f1c7da-eb94-4115-baf0-f15d335d85e0","Type":"ContainerStarted","Data":"4ced9f57be4446e67e4e177c95b7f8e4001f0083b4cc24d01bf530e640fb1997"} Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.624689 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="a8f1c7da-eb94-4115-baf0-f15d335d85e0" containerName="cinder-api-log" containerID="cri-o://93b489312118032a814911f7cb2dae24b326939a8ba73c3e954d17570b879b09" gracePeriod=30 Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.625161 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.625598 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="a8f1c7da-eb94-4115-baf0-f15d335d85e0" containerName="cinder-api" containerID="cri-o://4ced9f57be4446e67e4e177c95b7f8e4001f0083b4cc24d01bf530e640fb1997" gracePeriod=30 Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.632301 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c6d55f9d-af21-413d-8755-dd9af8386c23","Type":"ContainerStarted","Data":"3733862e27080b45b034444e405ae715bb8441a1f7771d14dc567b1087bcd387"} Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.651395 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=8.651373839 podStartE2EDuration="8.651373839s" podCreationTimestamp="2025-11-24 08:14:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:14:17.648864077 +0000 UTC m=+1019.647813336" watchObservedRunningTime="2025-11-24 08:14:17.651373839 +0000 UTC m=+1019.650323098" Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.672783 4691 generic.go:334] "Generic (PLEG): container finished" podID="3ad1b6c3-36a5-4991-b30f-092d3bf5018b" containerID="7bb3ca0a09089bdd3409fb63c8e5197ff1cb24a0adb8ef60d7304b60a2c759b9" exitCode=0 Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.672816 4691 generic.go:334] "Generic (PLEG): container finished" podID="3ad1b6c3-36a5-4991-b30f-092d3bf5018b" containerID="d38fd5e7f43b2207b83d5a6320067101a7c4eaae4438986f7b6d5307cbdc017a" exitCode=2 Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.672825 4691 generic.go:334] "Generic (PLEG): container finished" podID="3ad1b6c3-36a5-4991-b30f-092d3bf5018b" containerID="9d9ab5044ef815a0d40ad37118c7aaa2ab4e0bedf09936d4ea1717456adc8e9b" exitCode=0 Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.672876 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3ad1b6c3-36a5-4991-b30f-092d3bf5018b","Type":"ContainerDied","Data":"7bb3ca0a09089bdd3409fb63c8e5197ff1cb24a0adb8ef60d7304b60a2c759b9"} Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.672903 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3ad1b6c3-36a5-4991-b30f-092d3bf5018b","Type":"ContainerDied","Data":"d38fd5e7f43b2207b83d5a6320067101a7c4eaae4438986f7b6d5307cbdc017a"} Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.672916 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3ad1b6c3-36a5-4991-b30f-092d3bf5018b","Type":"ContainerDied","Data":"9d9ab5044ef815a0d40ad37118c7aaa2ab4e0bedf09936d4ea1717456adc8e9b"} Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.676515 4691 generic.go:334] "Generic (PLEG): container finished" podID="3a983467-b9da-4795-8cae-f645d8e316b4" containerID="47b3ecd7f16371ae92a18c61d7953d59670620351a104f3da9f310ae7a82863d" exitCode=137 Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.676539 4691 generic.go:334] "Generic (PLEG): container finished" podID="3a983467-b9da-4795-8cae-f645d8e316b4" containerID="1815f52c06ff4902d485d16863d6c8c6d29fc9b9047f32a7df18aaad0eec214e" exitCode=137 Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.676578 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-658647c585-2tj4h" event={"ID":"3a983467-b9da-4795-8cae-f645d8e316b4","Type":"ContainerDied","Data":"47b3ecd7f16371ae92a18c61d7953d59670620351a104f3da9f310ae7a82863d"} Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.676599 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-658647c585-2tj4h" event={"ID":"3a983467-b9da-4795-8cae-f645d8e316b4","Type":"ContainerDied","Data":"1815f52c06ff4902d485d16863d6c8c6d29fc9b9047f32a7df18aaad0eec214e"} Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.676609 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-658647c585-2tj4h" event={"ID":"3a983467-b9da-4795-8cae-f645d8e316b4","Type":"ContainerDied","Data":"743b7405bb72b1859aa19f915019ac8f1dd48a72a098ef986ff57fcf286accd4"} Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.676625 4691 scope.go:117] "RemoveContainer" containerID="47b3ecd7f16371ae92a18c61d7953d59670620351a104f3da9f310ae7a82863d" Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.676725 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-658647c585-2tj4h" Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.695649 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-cbv5w" event={"ID":"425954a5-8127-4cbe-879e-ae3124e74ee6","Type":"ContainerStarted","Data":"7a936e6272c238977cbd010fc647278165b67c42ef208ae2c7d79be1348f9703"} Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.697007 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6578955fd5-cbv5w" Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.730603 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6578955fd5-cbv5w" podStartSLOduration=9.730583995 podStartE2EDuration="9.730583995s" podCreationTimestamp="2025-11-24 08:14:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:14:17.724612392 +0000 UTC m=+1019.723561641" watchObservedRunningTime="2025-11-24 08:14:17.730583995 +0000 UTC m=+1019.729533244" Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.897651 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-658647c585-2tj4h"] Nov 24 08:14:17 crc kubenswrapper[4691]: I1124 08:14:17.906922 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-658647c585-2tj4h"] Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.115302 4691 scope.go:117] "RemoveContainer" containerID="1815f52c06ff4902d485d16863d6c8c6d29fc9b9047f32a7df18aaad0eec214e" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.142811 4691 scope.go:117] "RemoveContainer" containerID="47b3ecd7f16371ae92a18c61d7953d59670620351a104f3da9f310ae7a82863d" Nov 24 08:14:18 crc kubenswrapper[4691]: E1124 08:14:18.143332 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"47b3ecd7f16371ae92a18c61d7953d59670620351a104f3da9f310ae7a82863d\": container with ID starting with 47b3ecd7f16371ae92a18c61d7953d59670620351a104f3da9f310ae7a82863d not found: ID does not exist" containerID="47b3ecd7f16371ae92a18c61d7953d59670620351a104f3da9f310ae7a82863d" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.143377 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"47b3ecd7f16371ae92a18c61d7953d59670620351a104f3da9f310ae7a82863d"} err="failed to get container status \"47b3ecd7f16371ae92a18c61d7953d59670620351a104f3da9f310ae7a82863d\": rpc error: code = NotFound desc = could not find container \"47b3ecd7f16371ae92a18c61d7953d59670620351a104f3da9f310ae7a82863d\": container with ID starting with 47b3ecd7f16371ae92a18c61d7953d59670620351a104f3da9f310ae7a82863d not found: ID does not exist" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.143401 4691 scope.go:117] "RemoveContainer" containerID="1815f52c06ff4902d485d16863d6c8c6d29fc9b9047f32a7df18aaad0eec214e" Nov 24 08:14:18 crc kubenswrapper[4691]: E1124 08:14:18.143808 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1815f52c06ff4902d485d16863d6c8c6d29fc9b9047f32a7df18aaad0eec214e\": container with ID starting with 1815f52c06ff4902d485d16863d6c8c6d29fc9b9047f32a7df18aaad0eec214e not found: ID does not exist" containerID="1815f52c06ff4902d485d16863d6c8c6d29fc9b9047f32a7df18aaad0eec214e" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.143840 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1815f52c06ff4902d485d16863d6c8c6d29fc9b9047f32a7df18aaad0eec214e"} err="failed to get container status \"1815f52c06ff4902d485d16863d6c8c6d29fc9b9047f32a7df18aaad0eec214e\": rpc error: code = NotFound desc = could not find container \"1815f52c06ff4902d485d16863d6c8c6d29fc9b9047f32a7df18aaad0eec214e\": container with ID starting with 1815f52c06ff4902d485d16863d6c8c6d29fc9b9047f32a7df18aaad0eec214e not found: ID does not exist" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.143861 4691 scope.go:117] "RemoveContainer" containerID="47b3ecd7f16371ae92a18c61d7953d59670620351a104f3da9f310ae7a82863d" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.144185 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"47b3ecd7f16371ae92a18c61d7953d59670620351a104f3da9f310ae7a82863d"} err="failed to get container status \"47b3ecd7f16371ae92a18c61d7953d59670620351a104f3da9f310ae7a82863d\": rpc error: code = NotFound desc = could not find container \"47b3ecd7f16371ae92a18c61d7953d59670620351a104f3da9f310ae7a82863d\": container with ID starting with 47b3ecd7f16371ae92a18c61d7953d59670620351a104f3da9f310ae7a82863d not found: ID does not exist" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.144214 4691 scope.go:117] "RemoveContainer" containerID="1815f52c06ff4902d485d16863d6c8c6d29fc9b9047f32a7df18aaad0eec214e" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.144484 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1815f52c06ff4902d485d16863d6c8c6d29fc9b9047f32a7df18aaad0eec214e"} err="failed to get container status \"1815f52c06ff4902d485d16863d6c8c6d29fc9b9047f32a7df18aaad0eec214e\": rpc error: code = NotFound desc = could not find container \"1815f52c06ff4902d485d16863d6c8c6d29fc9b9047f32a7df18aaad0eec214e\": container with ID starting with 1815f52c06ff4902d485d16863d6c8c6d29fc9b9047f32a7df18aaad0eec214e not found: ID does not exist" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.283737 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.447028 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-scripts\") pod \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\" (UID: \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\") " Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.447456 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-log-httpd\") pod \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\" (UID: \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\") " Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.447488 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-combined-ca-bundle\") pod \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\" (UID: \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\") " Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.447529 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-run-httpd\") pod \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\" (UID: \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\") " Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.447583 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mf8r9\" (UniqueName: \"kubernetes.io/projected/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-kube-api-access-mf8r9\") pod \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\" (UID: \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\") " Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.447637 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-config-data\") pod \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\" (UID: \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\") " Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.447654 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-sg-core-conf-yaml\") pod \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\" (UID: \"3ad1b6c3-36a5-4991-b30f-092d3bf5018b\") " Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.448049 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "3ad1b6c3-36a5-4991-b30f-092d3bf5018b" (UID: "3ad1b6c3-36a5-4991-b30f-092d3bf5018b"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.448368 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "3ad1b6c3-36a5-4991-b30f-092d3bf5018b" (UID: "3ad1b6c3-36a5-4991-b30f-092d3bf5018b"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.455615 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-scripts" (OuterVolumeSpecName: "scripts") pod "3ad1b6c3-36a5-4991-b30f-092d3bf5018b" (UID: "3ad1b6c3-36a5-4991-b30f-092d3bf5018b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.455639 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-kube-api-access-mf8r9" (OuterVolumeSpecName: "kube-api-access-mf8r9") pod "3ad1b6c3-36a5-4991-b30f-092d3bf5018b" (UID: "3ad1b6c3-36a5-4991-b30f-092d3bf5018b"). InnerVolumeSpecName "kube-api-access-mf8r9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.480243 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "3ad1b6c3-36a5-4991-b30f-092d3bf5018b" (UID: "3ad1b6c3-36a5-4991-b30f-092d3bf5018b"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.535006 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3ad1b6c3-36a5-4991-b30f-092d3bf5018b" (UID: "3ad1b6c3-36a5-4991-b30f-092d3bf5018b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.549489 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mf8r9\" (UniqueName: \"kubernetes.io/projected/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-kube-api-access-mf8r9\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.549522 4691 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.549536 4691 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.549546 4691 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.549575 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.549584 4691 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.558932 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-config-data" (OuterVolumeSpecName: "config-data") pod "3ad1b6c3-36a5-4991-b30f-092d3bf5018b" (UID: "3ad1b6c3-36a5-4991-b30f-092d3bf5018b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.664619 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ad1b6c3-36a5-4991-b30f-092d3bf5018b-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.720036 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c6d55f9d-af21-413d-8755-dd9af8386c23","Type":"ContainerStarted","Data":"de811a427b57078cdd2a8b1504714a300429ddf0d4584444a51c64a900d87373"} Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.725686 4691 generic.go:334] "Generic (PLEG): container finished" podID="3ad1b6c3-36a5-4991-b30f-092d3bf5018b" containerID="6ac82b7b26a93e0bfa2b4d452ebac20e4b054f53605396bee598e404148ffb03" exitCode=0 Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.725861 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3ad1b6c3-36a5-4991-b30f-092d3bf5018b","Type":"ContainerDied","Data":"6ac82b7b26a93e0bfa2b4d452ebac20e4b054f53605396bee598e404148ffb03"} Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.726101 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3ad1b6c3-36a5-4991-b30f-092d3bf5018b","Type":"ContainerDied","Data":"7c03c469829d19475adf34a4c97b6fd1ebd4b6ff86feb06771d62533c06cd67d"} Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.725958 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.726126 4691 scope.go:117] "RemoveContainer" containerID="7bb3ca0a09089bdd3409fb63c8e5197ff1cb24a0adb8ef60d7304b60a2c759b9" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.748173 4691 generic.go:334] "Generic (PLEG): container finished" podID="a8f1c7da-eb94-4115-baf0-f15d335d85e0" containerID="93b489312118032a814911f7cb2dae24b326939a8ba73c3e954d17570b879b09" exitCode=143 Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.749249 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a8f1c7da-eb94-4115-baf0-f15d335d85e0","Type":"ContainerDied","Data":"93b489312118032a814911f7cb2dae24b326939a8ba73c3e954d17570b879b09"} Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.752915 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=10.038273943 podStartE2EDuration="10.752895743s" podCreationTimestamp="2025-11-24 08:14:08 +0000 UTC" firstStartedPulling="2025-11-24 08:14:15.895248286 +0000 UTC m=+1017.894197535" lastFinishedPulling="2025-11-24 08:14:16.609870086 +0000 UTC m=+1018.608819335" observedRunningTime="2025-11-24 08:14:18.745617822 +0000 UTC m=+1020.744567081" watchObservedRunningTime="2025-11-24 08:14:18.752895743 +0000 UTC m=+1020.751844992" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.792913 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a983467-b9da-4795-8cae-f645d8e316b4" path="/var/lib/kubelet/pods/3a983467-b9da-4795-8cae-f645d8e316b4/volumes" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.803204 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.814289 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.817523 4691 scope.go:117] "RemoveContainer" containerID="d38fd5e7f43b2207b83d5a6320067101a7c4eaae4438986f7b6d5307cbdc017a" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.831328 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:14:18 crc kubenswrapper[4691]: E1124 08:14:18.832330 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a983467-b9da-4795-8cae-f645d8e316b4" containerName="horizon" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.832348 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a983467-b9da-4795-8cae-f645d8e316b4" containerName="horizon" Nov 24 08:14:18 crc kubenswrapper[4691]: E1124 08:14:18.832361 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ad1b6c3-36a5-4991-b30f-092d3bf5018b" containerName="sg-core" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.832369 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ad1b6c3-36a5-4991-b30f-092d3bf5018b" containerName="sg-core" Nov 24 08:14:18 crc kubenswrapper[4691]: E1124 08:14:18.832383 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ad1b6c3-36a5-4991-b30f-092d3bf5018b" containerName="ceilometer-notification-agent" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.832389 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ad1b6c3-36a5-4991-b30f-092d3bf5018b" containerName="ceilometer-notification-agent" Nov 24 08:14:18 crc kubenswrapper[4691]: E1124 08:14:18.832396 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aad353e5-274f-4f13-b270-f8bf589fbc3e" containerName="init" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.832402 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="aad353e5-274f-4f13-b270-f8bf589fbc3e" containerName="init" Nov 24 08:14:18 crc kubenswrapper[4691]: E1124 08:14:18.832428 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a983467-b9da-4795-8cae-f645d8e316b4" containerName="horizon-log" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.832433 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a983467-b9da-4795-8cae-f645d8e316b4" containerName="horizon-log" Nov 24 08:14:18 crc kubenswrapper[4691]: E1124 08:14:18.832461 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ad1b6c3-36a5-4991-b30f-092d3bf5018b" containerName="proxy-httpd" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.832467 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ad1b6c3-36a5-4991-b30f-092d3bf5018b" containerName="proxy-httpd" Nov 24 08:14:18 crc kubenswrapper[4691]: E1124 08:14:18.832478 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ad1b6c3-36a5-4991-b30f-092d3bf5018b" containerName="ceilometer-central-agent" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.832485 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ad1b6c3-36a5-4991-b30f-092d3bf5018b" containerName="ceilometer-central-agent" Nov 24 08:14:18 crc kubenswrapper[4691]: E1124 08:14:18.832496 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aad353e5-274f-4f13-b270-f8bf589fbc3e" containerName="dnsmasq-dns" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.832502 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="aad353e5-274f-4f13-b270-f8bf589fbc3e" containerName="dnsmasq-dns" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.832676 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ad1b6c3-36a5-4991-b30f-092d3bf5018b" containerName="proxy-httpd" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.832689 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ad1b6c3-36a5-4991-b30f-092d3bf5018b" containerName="sg-core" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.832698 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ad1b6c3-36a5-4991-b30f-092d3bf5018b" containerName="ceilometer-notification-agent" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.832707 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a983467-b9da-4795-8cae-f645d8e316b4" containerName="horizon-log" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.832721 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ad1b6c3-36a5-4991-b30f-092d3bf5018b" containerName="ceilometer-central-agent" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.832734 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a983467-b9da-4795-8cae-f645d8e316b4" containerName="horizon" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.832745 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="aad353e5-274f-4f13-b270-f8bf589fbc3e" containerName="dnsmasq-dns" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.837905 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.844744 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.844769 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.846204 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.853380 4691 scope.go:117] "RemoveContainer" containerID="6ac82b7b26a93e0bfa2b4d452ebac20e4b054f53605396bee598e404148ffb03" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.890979 4691 scope.go:117] "RemoveContainer" containerID="9d9ab5044ef815a0d40ad37118c7aaa2ab4e0bedf09936d4ea1717456adc8e9b" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.926958 4691 scope.go:117] "RemoveContainer" containerID="7bb3ca0a09089bdd3409fb63c8e5197ff1cb24a0adb8ef60d7304b60a2c759b9" Nov 24 08:14:18 crc kubenswrapper[4691]: E1124 08:14:18.927488 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7bb3ca0a09089bdd3409fb63c8e5197ff1cb24a0adb8ef60d7304b60a2c759b9\": container with ID starting with 7bb3ca0a09089bdd3409fb63c8e5197ff1cb24a0adb8ef60d7304b60a2c759b9 not found: ID does not exist" containerID="7bb3ca0a09089bdd3409fb63c8e5197ff1cb24a0adb8ef60d7304b60a2c759b9" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.927555 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7bb3ca0a09089bdd3409fb63c8e5197ff1cb24a0adb8ef60d7304b60a2c759b9"} err="failed to get container status \"7bb3ca0a09089bdd3409fb63c8e5197ff1cb24a0adb8ef60d7304b60a2c759b9\": rpc error: code = NotFound desc = could not find container \"7bb3ca0a09089bdd3409fb63c8e5197ff1cb24a0adb8ef60d7304b60a2c759b9\": container with ID starting with 7bb3ca0a09089bdd3409fb63c8e5197ff1cb24a0adb8ef60d7304b60a2c759b9 not found: ID does not exist" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.927589 4691 scope.go:117] "RemoveContainer" containerID="d38fd5e7f43b2207b83d5a6320067101a7c4eaae4438986f7b6d5307cbdc017a" Nov 24 08:14:18 crc kubenswrapper[4691]: E1124 08:14:18.928029 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d38fd5e7f43b2207b83d5a6320067101a7c4eaae4438986f7b6d5307cbdc017a\": container with ID starting with d38fd5e7f43b2207b83d5a6320067101a7c4eaae4438986f7b6d5307cbdc017a not found: ID does not exist" containerID="d38fd5e7f43b2207b83d5a6320067101a7c4eaae4438986f7b6d5307cbdc017a" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.928077 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d38fd5e7f43b2207b83d5a6320067101a7c4eaae4438986f7b6d5307cbdc017a"} err="failed to get container status \"d38fd5e7f43b2207b83d5a6320067101a7c4eaae4438986f7b6d5307cbdc017a\": rpc error: code = NotFound desc = could not find container \"d38fd5e7f43b2207b83d5a6320067101a7c4eaae4438986f7b6d5307cbdc017a\": container with ID starting with d38fd5e7f43b2207b83d5a6320067101a7c4eaae4438986f7b6d5307cbdc017a not found: ID does not exist" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.928107 4691 scope.go:117] "RemoveContainer" containerID="6ac82b7b26a93e0bfa2b4d452ebac20e4b054f53605396bee598e404148ffb03" Nov 24 08:14:18 crc kubenswrapper[4691]: E1124 08:14:18.928439 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6ac82b7b26a93e0bfa2b4d452ebac20e4b054f53605396bee598e404148ffb03\": container with ID starting with 6ac82b7b26a93e0bfa2b4d452ebac20e4b054f53605396bee598e404148ffb03 not found: ID does not exist" containerID="6ac82b7b26a93e0bfa2b4d452ebac20e4b054f53605396bee598e404148ffb03" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.928489 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ac82b7b26a93e0bfa2b4d452ebac20e4b054f53605396bee598e404148ffb03"} err="failed to get container status \"6ac82b7b26a93e0bfa2b4d452ebac20e4b054f53605396bee598e404148ffb03\": rpc error: code = NotFound desc = could not find container \"6ac82b7b26a93e0bfa2b4d452ebac20e4b054f53605396bee598e404148ffb03\": container with ID starting with 6ac82b7b26a93e0bfa2b4d452ebac20e4b054f53605396bee598e404148ffb03 not found: ID does not exist" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.928509 4691 scope.go:117] "RemoveContainer" containerID="9d9ab5044ef815a0d40ad37118c7aaa2ab4e0bedf09936d4ea1717456adc8e9b" Nov 24 08:14:18 crc kubenswrapper[4691]: E1124 08:14:18.928854 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d9ab5044ef815a0d40ad37118c7aaa2ab4e0bedf09936d4ea1717456adc8e9b\": container with ID starting with 9d9ab5044ef815a0d40ad37118c7aaa2ab4e0bedf09936d4ea1717456adc8e9b not found: ID does not exist" containerID="9d9ab5044ef815a0d40ad37118c7aaa2ab4e0bedf09936d4ea1717456adc8e9b" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.928890 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d9ab5044ef815a0d40ad37118c7aaa2ab4e0bedf09936d4ea1717456adc8e9b"} err="failed to get container status \"9d9ab5044ef815a0d40ad37118c7aaa2ab4e0bedf09936d4ea1717456adc8e9b\": rpc error: code = NotFound desc = could not find container \"9d9ab5044ef815a0d40ad37118c7aaa2ab4e0bedf09936d4ea1717456adc8e9b\": container with ID starting with 9d9ab5044ef815a0d40ad37118c7aaa2ab4e0bedf09936d4ea1717456adc8e9b not found: ID does not exist" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.973962 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82e3978a-d699-4b7b-ac29-eee2e5d347f3-config-data\") pod \"ceilometer-0\" (UID: \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\") " pod="openstack/ceilometer-0" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.974031 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/82e3978a-d699-4b7b-ac29-eee2e5d347f3-scripts\") pod \"ceilometer-0\" (UID: \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\") " pod="openstack/ceilometer-0" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.974071 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xlxdm\" (UniqueName: \"kubernetes.io/projected/82e3978a-d699-4b7b-ac29-eee2e5d347f3-kube-api-access-xlxdm\") pod \"ceilometer-0\" (UID: \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\") " pod="openstack/ceilometer-0" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.974223 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82e3978a-d699-4b7b-ac29-eee2e5d347f3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\") " pod="openstack/ceilometer-0" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.974316 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82e3978a-d699-4b7b-ac29-eee2e5d347f3-run-httpd\") pod \"ceilometer-0\" (UID: \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\") " pod="openstack/ceilometer-0" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.974350 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82e3978a-d699-4b7b-ac29-eee2e5d347f3-log-httpd\") pod \"ceilometer-0\" (UID: \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\") " pod="openstack/ceilometer-0" Nov 24 08:14:18 crc kubenswrapper[4691]: I1124 08:14:18.974382 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/82e3978a-d699-4b7b-ac29-eee2e5d347f3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\") " pod="openstack/ceilometer-0" Nov 24 08:14:19 crc kubenswrapper[4691]: I1124 08:14:19.076319 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82e3978a-d699-4b7b-ac29-eee2e5d347f3-run-httpd\") pod \"ceilometer-0\" (UID: \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\") " pod="openstack/ceilometer-0" Nov 24 08:14:19 crc kubenswrapper[4691]: I1124 08:14:19.076383 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82e3978a-d699-4b7b-ac29-eee2e5d347f3-log-httpd\") pod \"ceilometer-0\" (UID: \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\") " pod="openstack/ceilometer-0" Nov 24 08:14:19 crc kubenswrapper[4691]: I1124 08:14:19.076410 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/82e3978a-d699-4b7b-ac29-eee2e5d347f3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\") " pod="openstack/ceilometer-0" Nov 24 08:14:19 crc kubenswrapper[4691]: I1124 08:14:19.076436 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82e3978a-d699-4b7b-ac29-eee2e5d347f3-config-data\") pod \"ceilometer-0\" (UID: \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\") " pod="openstack/ceilometer-0" Nov 24 08:14:19 crc kubenswrapper[4691]: I1124 08:14:19.076470 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/82e3978a-d699-4b7b-ac29-eee2e5d347f3-scripts\") pod \"ceilometer-0\" (UID: \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\") " pod="openstack/ceilometer-0" Nov 24 08:14:19 crc kubenswrapper[4691]: I1124 08:14:19.076494 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xlxdm\" (UniqueName: \"kubernetes.io/projected/82e3978a-d699-4b7b-ac29-eee2e5d347f3-kube-api-access-xlxdm\") pod \"ceilometer-0\" (UID: \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\") " pod="openstack/ceilometer-0" Nov 24 08:14:19 crc kubenswrapper[4691]: I1124 08:14:19.076570 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82e3978a-d699-4b7b-ac29-eee2e5d347f3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\") " pod="openstack/ceilometer-0" Nov 24 08:14:19 crc kubenswrapper[4691]: I1124 08:14:19.077098 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82e3978a-d699-4b7b-ac29-eee2e5d347f3-run-httpd\") pod \"ceilometer-0\" (UID: \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\") " pod="openstack/ceilometer-0" Nov 24 08:14:19 crc kubenswrapper[4691]: I1124 08:14:19.077444 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82e3978a-d699-4b7b-ac29-eee2e5d347f3-log-httpd\") pod \"ceilometer-0\" (UID: \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\") " pod="openstack/ceilometer-0" Nov 24 08:14:19 crc kubenswrapper[4691]: I1124 08:14:19.082623 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/82e3978a-d699-4b7b-ac29-eee2e5d347f3-scripts\") pod \"ceilometer-0\" (UID: \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\") " pod="openstack/ceilometer-0" Nov 24 08:14:19 crc kubenswrapper[4691]: I1124 08:14:19.084645 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82e3978a-d699-4b7b-ac29-eee2e5d347f3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\") " pod="openstack/ceilometer-0" Nov 24 08:14:19 crc kubenswrapper[4691]: I1124 08:14:19.097385 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/82e3978a-d699-4b7b-ac29-eee2e5d347f3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\") " pod="openstack/ceilometer-0" Nov 24 08:14:19 crc kubenswrapper[4691]: I1124 08:14:19.097882 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82e3978a-d699-4b7b-ac29-eee2e5d347f3-config-data\") pod \"ceilometer-0\" (UID: \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\") " pod="openstack/ceilometer-0" Nov 24 08:14:19 crc kubenswrapper[4691]: I1124 08:14:19.099013 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xlxdm\" (UniqueName: \"kubernetes.io/projected/82e3978a-d699-4b7b-ac29-eee2e5d347f3-kube-api-access-xlxdm\") pod \"ceilometer-0\" (UID: \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\") " pod="openstack/ceilometer-0" Nov 24 08:14:19 crc kubenswrapper[4691]: I1124 08:14:19.170827 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 08:14:19 crc kubenswrapper[4691]: I1124 08:14:19.204857 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 24 08:14:19 crc kubenswrapper[4691]: I1124 08:14:19.690022 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:14:19 crc kubenswrapper[4691]: W1124 08:14:19.699110 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod82e3978a_d699_4b7b_ac29_eee2e5d347f3.slice/crio-2989420e91532119a9144e01218b6d19033ee8292ab1d6d30589f6f3fe0edd7e WatchSource:0}: Error finding container 2989420e91532119a9144e01218b6d19033ee8292ab1d6d30589f6f3fe0edd7e: Status 404 returned error can't find the container with id 2989420e91532119a9144e01218b6d19033ee8292ab1d6d30589f6f3fe0edd7e Nov 24 08:14:19 crc kubenswrapper[4691]: I1124 08:14:19.760065 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82e3978a-d699-4b7b-ac29-eee2e5d347f3","Type":"ContainerStarted","Data":"2989420e91532119a9144e01218b6d19033ee8292ab1d6d30589f6f3fe0edd7e"} Nov 24 08:14:20 crc kubenswrapper[4691]: I1124 08:14:20.081824 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-57b84ccfdc-qnsn7" Nov 24 08:14:20 crc kubenswrapper[4691]: I1124 08:14:20.183993 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7fc5776b84-69d6x"] Nov 24 08:14:20 crc kubenswrapper[4691]: I1124 08:14:20.184625 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-7fc5776b84-69d6x" podUID="019368b7-2336-4105-a06f-a05ce4cdcc60" containerName="neutron-api" containerID="cri-o://b505c80d0a3e4a651ff0f71ed1fb9e016b5728a74c606f3afe30757f029deec3" gracePeriod=30 Nov 24 08:14:20 crc kubenswrapper[4691]: I1124 08:14:20.184795 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-7fc5776b84-69d6x" podUID="019368b7-2336-4105-a06f-a05ce4cdcc60" containerName="neutron-httpd" containerID="cri-o://3f2d31065c69e2013c05bd582aeae10523506355ca75683b23575687e055048a" gracePeriod=30 Nov 24 08:14:20 crc kubenswrapper[4691]: I1124 08:14:20.784518 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ad1b6c3-36a5-4991-b30f-092d3bf5018b" path="/var/lib/kubelet/pods/3ad1b6c3-36a5-4991-b30f-092d3bf5018b/volumes" Nov 24 08:14:20 crc kubenswrapper[4691]: I1124 08:14:20.789161 4691 generic.go:334] "Generic (PLEG): container finished" podID="019368b7-2336-4105-a06f-a05ce4cdcc60" containerID="3f2d31065c69e2013c05bd582aeae10523506355ca75683b23575687e055048a" exitCode=0 Nov 24 08:14:20 crc kubenswrapper[4691]: I1124 08:14:20.789211 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7fc5776b84-69d6x" event={"ID":"019368b7-2336-4105-a06f-a05ce4cdcc60","Type":"ContainerDied","Data":"3f2d31065c69e2013c05bd582aeae10523506355ca75683b23575687e055048a"} Nov 24 08:14:20 crc kubenswrapper[4691]: I1124 08:14:20.792641 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82e3978a-d699-4b7b-ac29-eee2e5d347f3","Type":"ContainerStarted","Data":"757732dab03be290ae443b94ad2196894fd2099add81bbd1ad7277bd3a539224"} Nov 24 08:14:21 crc kubenswrapper[4691]: I1124 08:14:21.172827 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-66c5785d58-5hgjb" podUID="ffa5629b-a725-48dd-a0ed-7b5d3b481189" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.165:9311/healthcheck\": read tcp 10.217.0.2:36168->10.217.0.165:9311: read: connection reset by peer" Nov 24 08:14:21 crc kubenswrapper[4691]: I1124 08:14:21.172867 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-66c5785d58-5hgjb" podUID="ffa5629b-a725-48dd-a0ed-7b5d3b481189" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.165:9311/healthcheck\": read tcp 10.217.0.2:36166->10.217.0.165:9311: read: connection reset by peer" Nov 24 08:14:21 crc kubenswrapper[4691]: I1124 08:14:21.605486 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-66c5785d58-5hgjb" Nov 24 08:14:21 crc kubenswrapper[4691]: I1124 08:14:21.742008 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ffa5629b-a725-48dd-a0ed-7b5d3b481189-logs\") pod \"ffa5629b-a725-48dd-a0ed-7b5d3b481189\" (UID: \"ffa5629b-a725-48dd-a0ed-7b5d3b481189\") " Nov 24 08:14:21 crc kubenswrapper[4691]: I1124 08:14:21.742183 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffa5629b-a725-48dd-a0ed-7b5d3b481189-combined-ca-bundle\") pod \"ffa5629b-a725-48dd-a0ed-7b5d3b481189\" (UID: \"ffa5629b-a725-48dd-a0ed-7b5d3b481189\") " Nov 24 08:14:21 crc kubenswrapper[4691]: I1124 08:14:21.742441 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pvhx6\" (UniqueName: \"kubernetes.io/projected/ffa5629b-a725-48dd-a0ed-7b5d3b481189-kube-api-access-pvhx6\") pod \"ffa5629b-a725-48dd-a0ed-7b5d3b481189\" (UID: \"ffa5629b-a725-48dd-a0ed-7b5d3b481189\") " Nov 24 08:14:21 crc kubenswrapper[4691]: I1124 08:14:21.742535 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffa5629b-a725-48dd-a0ed-7b5d3b481189-config-data\") pod \"ffa5629b-a725-48dd-a0ed-7b5d3b481189\" (UID: \"ffa5629b-a725-48dd-a0ed-7b5d3b481189\") " Nov 24 08:14:21 crc kubenswrapper[4691]: I1124 08:14:21.742620 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ffa5629b-a725-48dd-a0ed-7b5d3b481189-config-data-custom\") pod \"ffa5629b-a725-48dd-a0ed-7b5d3b481189\" (UID: \"ffa5629b-a725-48dd-a0ed-7b5d3b481189\") " Nov 24 08:14:21 crc kubenswrapper[4691]: I1124 08:14:21.743336 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ffa5629b-a725-48dd-a0ed-7b5d3b481189-logs" (OuterVolumeSpecName: "logs") pod "ffa5629b-a725-48dd-a0ed-7b5d3b481189" (UID: "ffa5629b-a725-48dd-a0ed-7b5d3b481189"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:14:21 crc kubenswrapper[4691]: I1124 08:14:21.752796 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffa5629b-a725-48dd-a0ed-7b5d3b481189-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "ffa5629b-a725-48dd-a0ed-7b5d3b481189" (UID: "ffa5629b-a725-48dd-a0ed-7b5d3b481189"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:21 crc kubenswrapper[4691]: I1124 08:14:21.753052 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ffa5629b-a725-48dd-a0ed-7b5d3b481189-kube-api-access-pvhx6" (OuterVolumeSpecName: "kube-api-access-pvhx6") pod "ffa5629b-a725-48dd-a0ed-7b5d3b481189" (UID: "ffa5629b-a725-48dd-a0ed-7b5d3b481189"). InnerVolumeSpecName "kube-api-access-pvhx6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:14:21 crc kubenswrapper[4691]: I1124 08:14:21.779800 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffa5629b-a725-48dd-a0ed-7b5d3b481189-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ffa5629b-a725-48dd-a0ed-7b5d3b481189" (UID: "ffa5629b-a725-48dd-a0ed-7b5d3b481189"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:21 crc kubenswrapper[4691]: I1124 08:14:21.804336 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82e3978a-d699-4b7b-ac29-eee2e5d347f3","Type":"ContainerStarted","Data":"6eae479600c567b7a7c67e54a0c0f5726535f14c0321b2309b5c75726c7752ce"} Nov 24 08:14:21 crc kubenswrapper[4691]: I1124 08:14:21.805871 4691 generic.go:334] "Generic (PLEG): container finished" podID="ffa5629b-a725-48dd-a0ed-7b5d3b481189" containerID="f2b2184bf03e574e68fac0c44246a3070995e4c343ddb737b3e4048bacc9f886" exitCode=0 Nov 24 08:14:21 crc kubenswrapper[4691]: I1124 08:14:21.805898 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-66c5785d58-5hgjb" event={"ID":"ffa5629b-a725-48dd-a0ed-7b5d3b481189","Type":"ContainerDied","Data":"f2b2184bf03e574e68fac0c44246a3070995e4c343ddb737b3e4048bacc9f886"} Nov 24 08:14:21 crc kubenswrapper[4691]: I1124 08:14:21.805916 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-66c5785d58-5hgjb" event={"ID":"ffa5629b-a725-48dd-a0ed-7b5d3b481189","Type":"ContainerDied","Data":"eb08d1a76a3eda79286f5a0fb0ba0b33527decf4f7ce01bffdb9b6e5a667eb31"} Nov 24 08:14:21 crc kubenswrapper[4691]: I1124 08:14:21.805934 4691 scope.go:117] "RemoveContainer" containerID="f2b2184bf03e574e68fac0c44246a3070995e4c343ddb737b3e4048bacc9f886" Nov 24 08:14:21 crc kubenswrapper[4691]: I1124 08:14:21.806070 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-66c5785d58-5hgjb" Nov 24 08:14:21 crc kubenswrapper[4691]: I1124 08:14:21.835703 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffa5629b-a725-48dd-a0ed-7b5d3b481189-config-data" (OuterVolumeSpecName: "config-data") pod "ffa5629b-a725-48dd-a0ed-7b5d3b481189" (UID: "ffa5629b-a725-48dd-a0ed-7b5d3b481189"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:21 crc kubenswrapper[4691]: I1124 08:14:21.835768 4691 scope.go:117] "RemoveContainer" containerID="a3b31d0eb3cef859bf8410a0395cb07ab214ebf4e571831865ee4c57eb56894b" Nov 24 08:14:21 crc kubenswrapper[4691]: I1124 08:14:21.844854 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pvhx6\" (UniqueName: \"kubernetes.io/projected/ffa5629b-a725-48dd-a0ed-7b5d3b481189-kube-api-access-pvhx6\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:21 crc kubenswrapper[4691]: I1124 08:14:21.844893 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffa5629b-a725-48dd-a0ed-7b5d3b481189-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:21 crc kubenswrapper[4691]: I1124 08:14:21.844908 4691 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ffa5629b-a725-48dd-a0ed-7b5d3b481189-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:21 crc kubenswrapper[4691]: I1124 08:14:21.844924 4691 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ffa5629b-a725-48dd-a0ed-7b5d3b481189-logs\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:21 crc kubenswrapper[4691]: I1124 08:14:21.844936 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffa5629b-a725-48dd-a0ed-7b5d3b481189-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:21 crc kubenswrapper[4691]: I1124 08:14:21.866920 4691 scope.go:117] "RemoveContainer" containerID="f2b2184bf03e574e68fac0c44246a3070995e4c343ddb737b3e4048bacc9f886" Nov 24 08:14:21 crc kubenswrapper[4691]: E1124 08:14:21.867422 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f2b2184bf03e574e68fac0c44246a3070995e4c343ddb737b3e4048bacc9f886\": container with ID starting with f2b2184bf03e574e68fac0c44246a3070995e4c343ddb737b3e4048bacc9f886 not found: ID does not exist" containerID="f2b2184bf03e574e68fac0c44246a3070995e4c343ddb737b3e4048bacc9f886" Nov 24 08:14:21 crc kubenswrapper[4691]: I1124 08:14:21.867480 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f2b2184bf03e574e68fac0c44246a3070995e4c343ddb737b3e4048bacc9f886"} err="failed to get container status \"f2b2184bf03e574e68fac0c44246a3070995e4c343ddb737b3e4048bacc9f886\": rpc error: code = NotFound desc = could not find container \"f2b2184bf03e574e68fac0c44246a3070995e4c343ddb737b3e4048bacc9f886\": container with ID starting with f2b2184bf03e574e68fac0c44246a3070995e4c343ddb737b3e4048bacc9f886 not found: ID does not exist" Nov 24 08:14:21 crc kubenswrapper[4691]: I1124 08:14:21.867508 4691 scope.go:117] "RemoveContainer" containerID="a3b31d0eb3cef859bf8410a0395cb07ab214ebf4e571831865ee4c57eb56894b" Nov 24 08:14:21 crc kubenswrapper[4691]: E1124 08:14:21.868369 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a3b31d0eb3cef859bf8410a0395cb07ab214ebf4e571831865ee4c57eb56894b\": container with ID starting with a3b31d0eb3cef859bf8410a0395cb07ab214ebf4e571831865ee4c57eb56894b not found: ID does not exist" containerID="a3b31d0eb3cef859bf8410a0395cb07ab214ebf4e571831865ee4c57eb56894b" Nov 24 08:14:21 crc kubenswrapper[4691]: I1124 08:14:21.868395 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a3b31d0eb3cef859bf8410a0395cb07ab214ebf4e571831865ee4c57eb56894b"} err="failed to get container status \"a3b31d0eb3cef859bf8410a0395cb07ab214ebf4e571831865ee4c57eb56894b\": rpc error: code = NotFound desc = could not find container \"a3b31d0eb3cef859bf8410a0395cb07ab214ebf4e571831865ee4c57eb56894b\": container with ID starting with a3b31d0eb3cef859bf8410a0395cb07ab214ebf4e571831865ee4c57eb56894b not found: ID does not exist" Nov 24 08:14:22 crc kubenswrapper[4691]: I1124 08:14:22.137589 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-66c5785d58-5hgjb"] Nov 24 08:14:22 crc kubenswrapper[4691]: I1124 08:14:22.151137 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-66c5785d58-5hgjb"] Nov 24 08:14:22 crc kubenswrapper[4691]: I1124 08:14:22.774614 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ffa5629b-a725-48dd-a0ed-7b5d3b481189" path="/var/lib/kubelet/pods/ffa5629b-a725-48dd-a0ed-7b5d3b481189/volumes" Nov 24 08:14:22 crc kubenswrapper[4691]: I1124 08:14:22.817957 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82e3978a-d699-4b7b-ac29-eee2e5d347f3","Type":"ContainerStarted","Data":"884f0abee83fda280a286fe75c7b6a3a0a033b6ac9d1395088868097dfa2ae4f"} Nov 24 08:14:23 crc kubenswrapper[4691]: I1124 08:14:23.833329 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82e3978a-d699-4b7b-ac29-eee2e5d347f3","Type":"ContainerStarted","Data":"67eb28b13cd17146ee41621e31b17128ce5fb15e3d22a2893fa8fc7cc3656296"} Nov 24 08:14:23 crc kubenswrapper[4691]: I1124 08:14:23.833627 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 24 08:14:23 crc kubenswrapper[4691]: I1124 08:14:23.878176 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.233030749 podStartE2EDuration="5.878151738s" podCreationTimestamp="2025-11-24 08:14:18 +0000 UTC" firstStartedPulling="2025-11-24 08:14:19.702343079 +0000 UTC m=+1021.701292328" lastFinishedPulling="2025-11-24 08:14:23.347464068 +0000 UTC m=+1025.346413317" observedRunningTime="2025-11-24 08:14:23.873409711 +0000 UTC m=+1025.872359050" watchObservedRunningTime="2025-11-24 08:14:23.878151738 +0000 UTC m=+1025.877100997" Nov 24 08:14:24 crc kubenswrapper[4691]: I1124 08:14:24.465209 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 24 08:14:24 crc kubenswrapper[4691]: I1124 08:14:24.496617 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6578955fd5-cbv5w" Nov 24 08:14:24 crc kubenswrapper[4691]: I1124 08:14:24.525436 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 24 08:14:24 crc kubenswrapper[4691]: I1124 08:14:24.568225 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-bqjww"] Nov 24 08:14:24 crc kubenswrapper[4691]: I1124 08:14:24.568644 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6b7b667979-bqjww" podUID="060626b1-822b-4c3a-a1b0-a1c14fb04c18" containerName="dnsmasq-dns" containerID="cri-o://0c2904c396c77b022c04b24128f89ae250630983439281e5cef40acfb847709b" gracePeriod=10 Nov 24 08:14:24 crc kubenswrapper[4691]: I1124 08:14:24.851352 4691 generic.go:334] "Generic (PLEG): container finished" podID="060626b1-822b-4c3a-a1b0-a1c14fb04c18" containerID="0c2904c396c77b022c04b24128f89ae250630983439281e5cef40acfb847709b" exitCode=0 Nov 24 08:14:24 crc kubenswrapper[4691]: I1124 08:14:24.851531 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-bqjww" event={"ID":"060626b1-822b-4c3a-a1b0-a1c14fb04c18","Type":"ContainerDied","Data":"0c2904c396c77b022c04b24128f89ae250630983439281e5cef40acfb847709b"} Nov 24 08:14:24 crc kubenswrapper[4691]: I1124 08:14:24.852430 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="c6d55f9d-af21-413d-8755-dd9af8386c23" containerName="cinder-scheduler" containerID="cri-o://3733862e27080b45b034444e405ae715bb8441a1f7771d14dc567b1087bcd387" gracePeriod=30 Nov 24 08:14:24 crc kubenswrapper[4691]: I1124 08:14:24.852761 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="c6d55f9d-af21-413d-8755-dd9af8386c23" containerName="probe" containerID="cri-o://de811a427b57078cdd2a8b1504714a300429ddf0d4584444a51c64a900d87373" gracePeriod=30 Nov 24 08:14:25 crc kubenswrapper[4691]: I1124 08:14:25.149415 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-bqjww" Nov 24 08:14:25 crc kubenswrapper[4691]: I1124 08:14:25.327290 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vbds4\" (UniqueName: \"kubernetes.io/projected/060626b1-822b-4c3a-a1b0-a1c14fb04c18-kube-api-access-vbds4\") pod \"060626b1-822b-4c3a-a1b0-a1c14fb04c18\" (UID: \"060626b1-822b-4c3a-a1b0-a1c14fb04c18\") " Nov 24 08:14:25 crc kubenswrapper[4691]: I1124 08:14:25.327617 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/060626b1-822b-4c3a-a1b0-a1c14fb04c18-config\") pod \"060626b1-822b-4c3a-a1b0-a1c14fb04c18\" (UID: \"060626b1-822b-4c3a-a1b0-a1c14fb04c18\") " Nov 24 08:14:25 crc kubenswrapper[4691]: I1124 08:14:25.327681 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/060626b1-822b-4c3a-a1b0-a1c14fb04c18-dns-swift-storage-0\") pod \"060626b1-822b-4c3a-a1b0-a1c14fb04c18\" (UID: \"060626b1-822b-4c3a-a1b0-a1c14fb04c18\") " Nov 24 08:14:25 crc kubenswrapper[4691]: I1124 08:14:25.327715 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/060626b1-822b-4c3a-a1b0-a1c14fb04c18-ovsdbserver-nb\") pod \"060626b1-822b-4c3a-a1b0-a1c14fb04c18\" (UID: \"060626b1-822b-4c3a-a1b0-a1c14fb04c18\") " Nov 24 08:14:25 crc kubenswrapper[4691]: I1124 08:14:25.327760 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/060626b1-822b-4c3a-a1b0-a1c14fb04c18-ovsdbserver-sb\") pod \"060626b1-822b-4c3a-a1b0-a1c14fb04c18\" (UID: \"060626b1-822b-4c3a-a1b0-a1c14fb04c18\") " Nov 24 08:14:25 crc kubenswrapper[4691]: I1124 08:14:25.327792 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/060626b1-822b-4c3a-a1b0-a1c14fb04c18-dns-svc\") pod \"060626b1-822b-4c3a-a1b0-a1c14fb04c18\" (UID: \"060626b1-822b-4c3a-a1b0-a1c14fb04c18\") " Nov 24 08:14:25 crc kubenswrapper[4691]: I1124 08:14:25.335750 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/060626b1-822b-4c3a-a1b0-a1c14fb04c18-kube-api-access-vbds4" (OuterVolumeSpecName: "kube-api-access-vbds4") pod "060626b1-822b-4c3a-a1b0-a1c14fb04c18" (UID: "060626b1-822b-4c3a-a1b0-a1c14fb04c18"). InnerVolumeSpecName "kube-api-access-vbds4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:14:25 crc kubenswrapper[4691]: I1124 08:14:25.391559 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/060626b1-822b-4c3a-a1b0-a1c14fb04c18-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "060626b1-822b-4c3a-a1b0-a1c14fb04c18" (UID: "060626b1-822b-4c3a-a1b0-a1c14fb04c18"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:14:25 crc kubenswrapper[4691]: I1124 08:14:25.399226 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/060626b1-822b-4c3a-a1b0-a1c14fb04c18-config" (OuterVolumeSpecName: "config") pod "060626b1-822b-4c3a-a1b0-a1c14fb04c18" (UID: "060626b1-822b-4c3a-a1b0-a1c14fb04c18"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:14:25 crc kubenswrapper[4691]: I1124 08:14:25.399657 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/060626b1-822b-4c3a-a1b0-a1c14fb04c18-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "060626b1-822b-4c3a-a1b0-a1c14fb04c18" (UID: "060626b1-822b-4c3a-a1b0-a1c14fb04c18"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:14:25 crc kubenswrapper[4691]: I1124 08:14:25.432608 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/060626b1-822b-4c3a-a1b0-a1c14fb04c18-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "060626b1-822b-4c3a-a1b0-a1c14fb04c18" (UID: "060626b1-822b-4c3a-a1b0-a1c14fb04c18"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:14:25 crc kubenswrapper[4691]: I1124 08:14:25.434082 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/060626b1-822b-4c3a-a1b0-a1c14fb04c18-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "060626b1-822b-4c3a-a1b0-a1c14fb04c18" (UID: "060626b1-822b-4c3a-a1b0-a1c14fb04c18"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:14:25 crc kubenswrapper[4691]: I1124 08:14:25.434495 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vbds4\" (UniqueName: \"kubernetes.io/projected/060626b1-822b-4c3a-a1b0-a1c14fb04c18-kube-api-access-vbds4\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:25 crc kubenswrapper[4691]: I1124 08:14:25.434526 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/060626b1-822b-4c3a-a1b0-a1c14fb04c18-config\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:25 crc kubenswrapper[4691]: I1124 08:14:25.434541 4691 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/060626b1-822b-4c3a-a1b0-a1c14fb04c18-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:25 crc kubenswrapper[4691]: I1124 08:14:25.434557 4691 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/060626b1-822b-4c3a-a1b0-a1c14fb04c18-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:25 crc kubenswrapper[4691]: I1124 08:14:25.434571 4691 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/060626b1-822b-4c3a-a1b0-a1c14fb04c18-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:25 crc kubenswrapper[4691]: I1124 08:14:25.536818 4691 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/060626b1-822b-4c3a-a1b0-a1c14fb04c18-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:25 crc kubenswrapper[4691]: I1124 08:14:25.864110 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-bqjww" Nov 24 08:14:25 crc kubenswrapper[4691]: I1124 08:14:25.864110 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-bqjww" event={"ID":"060626b1-822b-4c3a-a1b0-a1c14fb04c18","Type":"ContainerDied","Data":"1fa3f21881b2f293c8796d1671904b83c896f93a25149f2080ac71f7808563f2"} Nov 24 08:14:25 crc kubenswrapper[4691]: I1124 08:14:25.864187 4691 scope.go:117] "RemoveContainer" containerID="0c2904c396c77b022c04b24128f89ae250630983439281e5cef40acfb847709b" Nov 24 08:14:25 crc kubenswrapper[4691]: I1124 08:14:25.869349 4691 generic.go:334] "Generic (PLEG): container finished" podID="c6d55f9d-af21-413d-8755-dd9af8386c23" containerID="de811a427b57078cdd2a8b1504714a300429ddf0d4584444a51c64a900d87373" exitCode=0 Nov 24 08:14:25 crc kubenswrapper[4691]: I1124 08:14:25.869397 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c6d55f9d-af21-413d-8755-dd9af8386c23","Type":"ContainerDied","Data":"de811a427b57078cdd2a8b1504714a300429ddf0d4584444a51c64a900d87373"} Nov 24 08:14:25 crc kubenswrapper[4691]: I1124 08:14:25.904517 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-bqjww"] Nov 24 08:14:25 crc kubenswrapper[4691]: I1124 08:14:25.910659 4691 scope.go:117] "RemoveContainer" containerID="13553884387f303980046b6fe32b69600377b3f3706c43be7e7564530165cb20" Nov 24 08:14:25 crc kubenswrapper[4691]: I1124 08:14:25.912268 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-bqjww"] Nov 24 08:14:26 crc kubenswrapper[4691]: I1124 08:14:26.379664 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-77477f4d7b-kclfz" Nov 24 08:14:26 crc kubenswrapper[4691]: I1124 08:14:26.439508 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-5fb4677cdd-69rb6" Nov 24 08:14:26 crc kubenswrapper[4691]: I1124 08:14:26.775061 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="060626b1-822b-4c3a-a1b0-a1c14fb04c18" path="/var/lib/kubelet/pods/060626b1-822b-4c3a-a1b0-a1c14fb04c18/volumes" Nov 24 08:14:27 crc kubenswrapper[4691]: I1124 08:14:27.104277 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 24 08:14:28 crc kubenswrapper[4691]: I1124 08:14:28.349068 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-77477f4d7b-kclfz" Nov 24 08:14:28 crc kubenswrapper[4691]: I1124 08:14:28.527982 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-5fb4677cdd-69rb6" Nov 24 08:14:28 crc kubenswrapper[4691]: I1124 08:14:28.604908 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-77477f4d7b-kclfz"] Nov 24 08:14:28 crc kubenswrapper[4691]: I1124 08:14:28.850442 4691 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","podfaf5645f-a25c-4bde-9769-51e1681b7eba"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort podfaf5645f-a25c-4bde-9769-51e1681b7eba] : Timed out while waiting for systemd to remove kubepods-besteffort-podfaf5645f_a25c_4bde_9769_51e1681b7eba.slice" Nov 24 08:14:28 crc kubenswrapper[4691]: I1124 08:14:28.908809 4691 generic.go:334] "Generic (PLEG): container finished" podID="019368b7-2336-4105-a06f-a05ce4cdcc60" containerID="b505c80d0a3e4a651ff0f71ed1fb9e016b5728a74c606f3afe30757f029deec3" exitCode=0 Nov 24 08:14:28 crc kubenswrapper[4691]: I1124 08:14:28.908887 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7fc5776b84-69d6x" event={"ID":"019368b7-2336-4105-a06f-a05ce4cdcc60","Type":"ContainerDied","Data":"b505c80d0a3e4a651ff0f71ed1fb9e016b5728a74c606f3afe30757f029deec3"} Nov 24 08:14:28 crc kubenswrapper[4691]: I1124 08:14:28.909072 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-77477f4d7b-kclfz" podUID="567ed4cd-aaf3-4e52-be70-2f723075d545" containerName="horizon-log" containerID="cri-o://3572712a1270dccc8927c1bebc186c5962ee6c9011a6844588c0df16ab94736c" gracePeriod=30 Nov 24 08:14:28 crc kubenswrapper[4691]: I1124 08:14:28.909114 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-77477f4d7b-kclfz" podUID="567ed4cd-aaf3-4e52-be70-2f723075d545" containerName="horizon" containerID="cri-o://6e9caea14320e2c5eb5fe2c000a17f511e9ec24ec9db6ea8fb6a9ec8334da824" gracePeriod=30 Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.351229 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7fc5776b84-69d6x" Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.474150 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.518658 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nrrbs\" (UniqueName: \"kubernetes.io/projected/019368b7-2336-4105-a06f-a05ce4cdcc60-kube-api-access-nrrbs\") pod \"019368b7-2336-4105-a06f-a05ce4cdcc60\" (UID: \"019368b7-2336-4105-a06f-a05ce4cdcc60\") " Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.518992 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/019368b7-2336-4105-a06f-a05ce4cdcc60-ovndb-tls-certs\") pod \"019368b7-2336-4105-a06f-a05ce4cdcc60\" (UID: \"019368b7-2336-4105-a06f-a05ce4cdcc60\") " Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.519614 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/019368b7-2336-4105-a06f-a05ce4cdcc60-httpd-config\") pod \"019368b7-2336-4105-a06f-a05ce4cdcc60\" (UID: \"019368b7-2336-4105-a06f-a05ce4cdcc60\") " Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.519713 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/019368b7-2336-4105-a06f-a05ce4cdcc60-combined-ca-bundle\") pod \"019368b7-2336-4105-a06f-a05ce4cdcc60\" (UID: \"019368b7-2336-4105-a06f-a05ce4cdcc60\") " Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.519888 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/019368b7-2336-4105-a06f-a05ce4cdcc60-config\") pod \"019368b7-2336-4105-a06f-a05ce4cdcc60\" (UID: \"019368b7-2336-4105-a06f-a05ce4cdcc60\") " Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.537635 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/019368b7-2336-4105-a06f-a05ce4cdcc60-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "019368b7-2336-4105-a06f-a05ce4cdcc60" (UID: "019368b7-2336-4105-a06f-a05ce4cdcc60"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.539987 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/019368b7-2336-4105-a06f-a05ce4cdcc60-kube-api-access-nrrbs" (OuterVolumeSpecName: "kube-api-access-nrrbs") pod "019368b7-2336-4105-a06f-a05ce4cdcc60" (UID: "019368b7-2336-4105-a06f-a05ce4cdcc60"). InnerVolumeSpecName "kube-api-access-nrrbs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.574324 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/019368b7-2336-4105-a06f-a05ce4cdcc60-config" (OuterVolumeSpecName: "config") pod "019368b7-2336-4105-a06f-a05ce4cdcc60" (UID: "019368b7-2336-4105-a06f-a05ce4cdcc60"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.592321 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/019368b7-2336-4105-a06f-a05ce4cdcc60-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "019368b7-2336-4105-a06f-a05ce4cdcc60" (UID: "019368b7-2336-4105-a06f-a05ce4cdcc60"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.623117 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6d55f9d-af21-413d-8755-dd9af8386c23-scripts\") pod \"c6d55f9d-af21-413d-8755-dd9af8386c23\" (UID: \"c6d55f9d-af21-413d-8755-dd9af8386c23\") " Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.623207 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6d55f9d-af21-413d-8755-dd9af8386c23-config-data\") pod \"c6d55f9d-af21-413d-8755-dd9af8386c23\" (UID: \"c6d55f9d-af21-413d-8755-dd9af8386c23\") " Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.623247 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6d55f9d-af21-413d-8755-dd9af8386c23-combined-ca-bundle\") pod \"c6d55f9d-af21-413d-8755-dd9af8386c23\" (UID: \"c6d55f9d-af21-413d-8755-dd9af8386c23\") " Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.623846 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z9dxm\" (UniqueName: \"kubernetes.io/projected/c6d55f9d-af21-413d-8755-dd9af8386c23-kube-api-access-z9dxm\") pod \"c6d55f9d-af21-413d-8755-dd9af8386c23\" (UID: \"c6d55f9d-af21-413d-8755-dd9af8386c23\") " Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.623945 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c6d55f9d-af21-413d-8755-dd9af8386c23-config-data-custom\") pod \"c6d55f9d-af21-413d-8755-dd9af8386c23\" (UID: \"c6d55f9d-af21-413d-8755-dd9af8386c23\") " Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.624030 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c6d55f9d-af21-413d-8755-dd9af8386c23-etc-machine-id\") pod \"c6d55f9d-af21-413d-8755-dd9af8386c23\" (UID: \"c6d55f9d-af21-413d-8755-dd9af8386c23\") " Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.624530 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nrrbs\" (UniqueName: \"kubernetes.io/projected/019368b7-2336-4105-a06f-a05ce4cdcc60-kube-api-access-nrrbs\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.624551 4691 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/019368b7-2336-4105-a06f-a05ce4cdcc60-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.624564 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/019368b7-2336-4105-a06f-a05ce4cdcc60-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.624575 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/019368b7-2336-4105-a06f-a05ce4cdcc60-config\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.624631 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c6d55f9d-af21-413d-8755-dd9af8386c23-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "c6d55f9d-af21-413d-8755-dd9af8386c23" (UID: "c6d55f9d-af21-413d-8755-dd9af8386c23"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.627425 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6d55f9d-af21-413d-8755-dd9af8386c23-kube-api-access-z9dxm" (OuterVolumeSpecName: "kube-api-access-z9dxm") pod "c6d55f9d-af21-413d-8755-dd9af8386c23" (UID: "c6d55f9d-af21-413d-8755-dd9af8386c23"). InnerVolumeSpecName "kube-api-access-z9dxm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.630602 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6d55f9d-af21-413d-8755-dd9af8386c23-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c6d55f9d-af21-413d-8755-dd9af8386c23" (UID: "c6d55f9d-af21-413d-8755-dd9af8386c23"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.633008 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6d55f9d-af21-413d-8755-dd9af8386c23-scripts" (OuterVolumeSpecName: "scripts") pod "c6d55f9d-af21-413d-8755-dd9af8386c23" (UID: "c6d55f9d-af21-413d-8755-dd9af8386c23"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.637315 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/019368b7-2336-4105-a06f-a05ce4cdcc60-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "019368b7-2336-4105-a06f-a05ce4cdcc60" (UID: "019368b7-2336-4105-a06f-a05ce4cdcc60"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.677160 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6d55f9d-af21-413d-8755-dd9af8386c23-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c6d55f9d-af21-413d-8755-dd9af8386c23" (UID: "c6d55f9d-af21-413d-8755-dd9af8386c23"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.720137 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6d55f9d-af21-413d-8755-dd9af8386c23-config-data" (OuterVolumeSpecName: "config-data") pod "c6d55f9d-af21-413d-8755-dd9af8386c23" (UID: "c6d55f9d-af21-413d-8755-dd9af8386c23"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.726292 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z9dxm\" (UniqueName: \"kubernetes.io/projected/c6d55f9d-af21-413d-8755-dd9af8386c23-kube-api-access-z9dxm\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.726316 4691 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c6d55f9d-af21-413d-8755-dd9af8386c23-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.726326 4691 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c6d55f9d-af21-413d-8755-dd9af8386c23-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.726334 4691 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6d55f9d-af21-413d-8755-dd9af8386c23-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.726342 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6d55f9d-af21-413d-8755-dd9af8386c23-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.726350 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6d55f9d-af21-413d-8755-dd9af8386c23-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.726358 4691 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/019368b7-2336-4105-a06f-a05ce4cdcc60-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.920413 4691 generic.go:334] "Generic (PLEG): container finished" podID="c6d55f9d-af21-413d-8755-dd9af8386c23" containerID="3733862e27080b45b034444e405ae715bb8441a1f7771d14dc567b1087bcd387" exitCode=0 Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.920573 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.923608 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c6d55f9d-af21-413d-8755-dd9af8386c23","Type":"ContainerDied","Data":"3733862e27080b45b034444e405ae715bb8441a1f7771d14dc567b1087bcd387"} Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.923666 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c6d55f9d-af21-413d-8755-dd9af8386c23","Type":"ContainerDied","Data":"4eda5274b48f3e1f0baed00f4dda0b97fac99ff9d650ba013a4316ee3f5a9059"} Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.923693 4691 scope.go:117] "RemoveContainer" containerID="de811a427b57078cdd2a8b1504714a300429ddf0d4584444a51c64a900d87373" Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.927055 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7fc5776b84-69d6x" event={"ID":"019368b7-2336-4105-a06f-a05ce4cdcc60","Type":"ContainerDied","Data":"57d1521d7f954c2a5ff240c7513699d5818fd0c459b6f598d6247f11fadf907b"} Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.927162 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7fc5776b84-69d6x" Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.969902 4691 scope.go:117] "RemoveContainer" containerID="3733862e27080b45b034444e405ae715bb8441a1f7771d14dc567b1087bcd387" Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.987929 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 24 08:14:29 crc kubenswrapper[4691]: I1124 08:14:29.993022 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.000956 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7fc5776b84-69d6x"] Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.009160 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-7fc5776b84-69d6x"] Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.015550 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 24 08:14:30 crc kubenswrapper[4691]: E1124 08:14:30.016125 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6d55f9d-af21-413d-8755-dd9af8386c23" containerName="probe" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.016144 4691 scope.go:117] "RemoveContainer" containerID="de811a427b57078cdd2a8b1504714a300429ddf0d4584444a51c64a900d87373" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.016157 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6d55f9d-af21-413d-8755-dd9af8386c23" containerName="probe" Nov 24 08:14:30 crc kubenswrapper[4691]: E1124 08:14:30.016548 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="060626b1-822b-4c3a-a1b0-a1c14fb04c18" containerName="dnsmasq-dns" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.016572 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="060626b1-822b-4c3a-a1b0-a1c14fb04c18" containerName="dnsmasq-dns" Nov 24 08:14:30 crc kubenswrapper[4691]: E1124 08:14:30.016592 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="060626b1-822b-4c3a-a1b0-a1c14fb04c18" containerName="init" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.016603 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="060626b1-822b-4c3a-a1b0-a1c14fb04c18" containerName="init" Nov 24 08:14:30 crc kubenswrapper[4691]: E1124 08:14:30.016622 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffa5629b-a725-48dd-a0ed-7b5d3b481189" containerName="barbican-api-log" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.016635 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffa5629b-a725-48dd-a0ed-7b5d3b481189" containerName="barbican-api-log" Nov 24 08:14:30 crc kubenswrapper[4691]: E1124 08:14:30.016681 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6d55f9d-af21-413d-8755-dd9af8386c23" containerName="cinder-scheduler" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.016693 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6d55f9d-af21-413d-8755-dd9af8386c23" containerName="cinder-scheduler" Nov 24 08:14:30 crc kubenswrapper[4691]: E1124 08:14:30.016734 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="019368b7-2336-4105-a06f-a05ce4cdcc60" containerName="neutron-httpd" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.016746 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="019368b7-2336-4105-a06f-a05ce4cdcc60" containerName="neutron-httpd" Nov 24 08:14:30 crc kubenswrapper[4691]: E1124 08:14:30.016793 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffa5629b-a725-48dd-a0ed-7b5d3b481189" containerName="barbican-api" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.016804 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffa5629b-a725-48dd-a0ed-7b5d3b481189" containerName="barbican-api" Nov 24 08:14:30 crc kubenswrapper[4691]: E1124 08:14:30.016824 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="019368b7-2336-4105-a06f-a05ce4cdcc60" containerName="neutron-api" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.016835 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="019368b7-2336-4105-a06f-a05ce4cdcc60" containerName="neutron-api" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.017385 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6d55f9d-af21-413d-8755-dd9af8386c23" containerName="probe" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.017408 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6d55f9d-af21-413d-8755-dd9af8386c23" containerName="cinder-scheduler" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.017430 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffa5629b-a725-48dd-a0ed-7b5d3b481189" containerName="barbican-api-log" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.017468 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="019368b7-2336-4105-a06f-a05ce4cdcc60" containerName="neutron-api" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.017489 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffa5629b-a725-48dd-a0ed-7b5d3b481189" containerName="barbican-api" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.017511 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="060626b1-822b-4c3a-a1b0-a1c14fb04c18" containerName="dnsmasq-dns" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.017538 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="019368b7-2336-4105-a06f-a05ce4cdcc60" containerName="neutron-httpd" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.019389 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 24 08:14:30 crc kubenswrapper[4691]: E1124 08:14:30.019908 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"de811a427b57078cdd2a8b1504714a300429ddf0d4584444a51c64a900d87373\": container with ID starting with de811a427b57078cdd2a8b1504714a300429ddf0d4584444a51c64a900d87373 not found: ID does not exist" containerID="de811a427b57078cdd2a8b1504714a300429ddf0d4584444a51c64a900d87373" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.019937 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de811a427b57078cdd2a8b1504714a300429ddf0d4584444a51c64a900d87373"} err="failed to get container status \"de811a427b57078cdd2a8b1504714a300429ddf0d4584444a51c64a900d87373\": rpc error: code = NotFound desc = could not find container \"de811a427b57078cdd2a8b1504714a300429ddf0d4584444a51c64a900d87373\": container with ID starting with de811a427b57078cdd2a8b1504714a300429ddf0d4584444a51c64a900d87373 not found: ID does not exist" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.019961 4691 scope.go:117] "RemoveContainer" containerID="3733862e27080b45b034444e405ae715bb8441a1f7771d14dc567b1087bcd387" Nov 24 08:14:30 crc kubenswrapper[4691]: E1124 08:14:30.020177 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3733862e27080b45b034444e405ae715bb8441a1f7771d14dc567b1087bcd387\": container with ID starting with 3733862e27080b45b034444e405ae715bb8441a1f7771d14dc567b1087bcd387 not found: ID does not exist" containerID="3733862e27080b45b034444e405ae715bb8441a1f7771d14dc567b1087bcd387" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.020194 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3733862e27080b45b034444e405ae715bb8441a1f7771d14dc567b1087bcd387"} err="failed to get container status \"3733862e27080b45b034444e405ae715bb8441a1f7771d14dc567b1087bcd387\": rpc error: code = NotFound desc = could not find container \"3733862e27080b45b034444e405ae715bb8441a1f7771d14dc567b1087bcd387\": container with ID starting with 3733862e27080b45b034444e405ae715bb8441a1f7771d14dc567b1087bcd387 not found: ID does not exist" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.020367 4691 scope.go:117] "RemoveContainer" containerID="3f2d31065c69e2013c05bd582aeae10523506355ca75683b23575687e055048a" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.025383 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.040719 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.055382 4691 scope.go:117] "RemoveContainer" containerID="b505c80d0a3e4a651ff0f71ed1fb9e016b5728a74c606f3afe30757f029deec3" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.105589 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-54fc9d9c65-98hdh" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.133422 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2hlgh\" (UniqueName: \"kubernetes.io/projected/68176dd8-7480-4c30-8788-dd915e1568d5-kube-api-access-2hlgh\") pod \"cinder-scheduler-0\" (UID: \"68176dd8-7480-4c30-8788-dd915e1568d5\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.133663 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/68176dd8-7480-4c30-8788-dd915e1568d5-scripts\") pod \"cinder-scheduler-0\" (UID: \"68176dd8-7480-4c30-8788-dd915e1568d5\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.133706 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68176dd8-7480-4c30-8788-dd915e1568d5-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"68176dd8-7480-4c30-8788-dd915e1568d5\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.133762 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/68176dd8-7480-4c30-8788-dd915e1568d5-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"68176dd8-7480-4c30-8788-dd915e1568d5\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.133987 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/68176dd8-7480-4c30-8788-dd915e1568d5-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"68176dd8-7480-4c30-8788-dd915e1568d5\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.134034 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/68176dd8-7480-4c30-8788-dd915e1568d5-config-data\") pod \"cinder-scheduler-0\" (UID: \"68176dd8-7480-4c30-8788-dd915e1568d5\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.235922 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/68176dd8-7480-4c30-8788-dd915e1568d5-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"68176dd8-7480-4c30-8788-dd915e1568d5\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.235993 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/68176dd8-7480-4c30-8788-dd915e1568d5-config-data\") pod \"cinder-scheduler-0\" (UID: \"68176dd8-7480-4c30-8788-dd915e1568d5\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.236027 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2hlgh\" (UniqueName: \"kubernetes.io/projected/68176dd8-7480-4c30-8788-dd915e1568d5-kube-api-access-2hlgh\") pod \"cinder-scheduler-0\" (UID: \"68176dd8-7480-4c30-8788-dd915e1568d5\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.236123 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/68176dd8-7480-4c30-8788-dd915e1568d5-scripts\") pod \"cinder-scheduler-0\" (UID: \"68176dd8-7480-4c30-8788-dd915e1568d5\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.236155 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68176dd8-7480-4c30-8788-dd915e1568d5-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"68176dd8-7480-4c30-8788-dd915e1568d5\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.236203 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/68176dd8-7480-4c30-8788-dd915e1568d5-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"68176dd8-7480-4c30-8788-dd915e1568d5\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.241519 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/68176dd8-7480-4c30-8788-dd915e1568d5-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"68176dd8-7480-4c30-8788-dd915e1568d5\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.241806 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/68176dd8-7480-4c30-8788-dd915e1568d5-scripts\") pod \"cinder-scheduler-0\" (UID: \"68176dd8-7480-4c30-8788-dd915e1568d5\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.242540 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68176dd8-7480-4c30-8788-dd915e1568d5-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"68176dd8-7480-4c30-8788-dd915e1568d5\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.251704 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/68176dd8-7480-4c30-8788-dd915e1568d5-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"68176dd8-7480-4c30-8788-dd915e1568d5\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.253565 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/68176dd8-7480-4c30-8788-dd915e1568d5-config-data\") pod \"cinder-scheduler-0\" (UID: \"68176dd8-7480-4c30-8788-dd915e1568d5\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.253968 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2hlgh\" (UniqueName: \"kubernetes.io/projected/68176dd8-7480-4c30-8788-dd915e1568d5-kube-api-access-2hlgh\") pod \"cinder-scheduler-0\" (UID: \"68176dd8-7480-4c30-8788-dd915e1568d5\") " pod="openstack/cinder-scheduler-0" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.352752 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.772265 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="019368b7-2336-4105-a06f-a05ce4cdcc60" path="/var/lib/kubelet/pods/019368b7-2336-4105-a06f-a05ce4cdcc60/volumes" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.773526 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6d55f9d-af21-413d-8755-dd9af8386c23" path="/var/lib/kubelet/pods/c6d55f9d-af21-413d-8755-dd9af8386c23/volumes" Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.854938 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 24 08:14:30 crc kubenswrapper[4691]: I1124 08:14:30.958144 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"68176dd8-7480-4c30-8788-dd915e1568d5","Type":"ContainerStarted","Data":"f861ce48caedfed9ac960a57ab265da89332f438bdacff098f82f471e1becb9f"} Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.220438 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.222133 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.224312 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-lj9zv" Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.226796 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.227192 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.236217 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.365333 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/5b0f8f64-977e-4952-81cb-7627c6b29b2d-openstack-config\") pod \"openstackclient\" (UID: \"5b0f8f64-977e-4952-81cb-7627c6b29b2d\") " pod="openstack/openstackclient" Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.365537 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/5b0f8f64-977e-4952-81cb-7627c6b29b2d-openstack-config-secret\") pod \"openstackclient\" (UID: \"5b0f8f64-977e-4952-81cb-7627c6b29b2d\") " pod="openstack/openstackclient" Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.365986 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b0f8f64-977e-4952-81cb-7627c6b29b2d-combined-ca-bundle\") pod \"openstackclient\" (UID: \"5b0f8f64-977e-4952-81cb-7627c6b29b2d\") " pod="openstack/openstackclient" Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.366163 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bwv6t\" (UniqueName: \"kubernetes.io/projected/5b0f8f64-977e-4952-81cb-7627c6b29b2d-kube-api-access-bwv6t\") pod \"openstackclient\" (UID: \"5b0f8f64-977e-4952-81cb-7627c6b29b2d\") " pod="openstack/openstackclient" Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.397375 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-77fc7f8568-9mx5z" Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.455950 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-77fc7f8568-9mx5z" Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.469076 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/5b0f8f64-977e-4952-81cb-7627c6b29b2d-openstack-config\") pod \"openstackclient\" (UID: \"5b0f8f64-977e-4952-81cb-7627c6b29b2d\") " pod="openstack/openstackclient" Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.469155 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/5b0f8f64-977e-4952-81cb-7627c6b29b2d-openstack-config-secret\") pod \"openstackclient\" (UID: \"5b0f8f64-977e-4952-81cb-7627c6b29b2d\") " pod="openstack/openstackclient" Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.469204 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b0f8f64-977e-4952-81cb-7627c6b29b2d-combined-ca-bundle\") pod \"openstackclient\" (UID: \"5b0f8f64-977e-4952-81cb-7627c6b29b2d\") " pod="openstack/openstackclient" Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.469243 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bwv6t\" (UniqueName: \"kubernetes.io/projected/5b0f8f64-977e-4952-81cb-7627c6b29b2d-kube-api-access-bwv6t\") pod \"openstackclient\" (UID: \"5b0f8f64-977e-4952-81cb-7627c6b29b2d\") " pod="openstack/openstackclient" Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.470297 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/5b0f8f64-977e-4952-81cb-7627c6b29b2d-openstack-config\") pod \"openstackclient\" (UID: \"5b0f8f64-977e-4952-81cb-7627c6b29b2d\") " pod="openstack/openstackclient" Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.481408 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b0f8f64-977e-4952-81cb-7627c6b29b2d-combined-ca-bundle\") pod \"openstackclient\" (UID: \"5b0f8f64-977e-4952-81cb-7627c6b29b2d\") " pod="openstack/openstackclient" Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.498909 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 24 08:14:31 crc kubenswrapper[4691]: E1124 08:14:31.499440 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-bwv6t openstack-config-secret], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/openstackclient" podUID="5b0f8f64-977e-4952-81cb-7627c6b29b2d" Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.501988 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/5b0f8f64-977e-4952-81cb-7627c6b29b2d-openstack-config-secret\") pod \"openstackclient\" (UID: \"5b0f8f64-977e-4952-81cb-7627c6b29b2d\") " pod="openstack/openstackclient" Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.510354 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwv6t\" (UniqueName: \"kubernetes.io/projected/5b0f8f64-977e-4952-81cb-7627c6b29b2d-kube-api-access-bwv6t\") pod \"openstackclient\" (UID: \"5b0f8f64-977e-4952-81cb-7627c6b29b2d\") " pod="openstack/openstackclient" Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.528618 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.569733 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.571374 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.581268 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.679552 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/86907013-52ae-4aeb-a697-6066cfdbebde-openstack-config\") pod \"openstackclient\" (UID: \"86907013-52ae-4aeb-a697-6066cfdbebde\") " pod="openstack/openstackclient" Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.679610 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w92zg\" (UniqueName: \"kubernetes.io/projected/86907013-52ae-4aeb-a697-6066cfdbebde-kube-api-access-w92zg\") pod \"openstackclient\" (UID: \"86907013-52ae-4aeb-a697-6066cfdbebde\") " pod="openstack/openstackclient" Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.679713 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/86907013-52ae-4aeb-a697-6066cfdbebde-openstack-config-secret\") pod \"openstackclient\" (UID: \"86907013-52ae-4aeb-a697-6066cfdbebde\") " pod="openstack/openstackclient" Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.679745 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/86907013-52ae-4aeb-a697-6066cfdbebde-combined-ca-bundle\") pod \"openstackclient\" (UID: \"86907013-52ae-4aeb-a697-6066cfdbebde\") " pod="openstack/openstackclient" Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.782039 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/86907013-52ae-4aeb-a697-6066cfdbebde-openstack-config-secret\") pod \"openstackclient\" (UID: \"86907013-52ae-4aeb-a697-6066cfdbebde\") " pod="openstack/openstackclient" Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.782094 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/86907013-52ae-4aeb-a697-6066cfdbebde-combined-ca-bundle\") pod \"openstackclient\" (UID: \"86907013-52ae-4aeb-a697-6066cfdbebde\") " pod="openstack/openstackclient" Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.782212 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/86907013-52ae-4aeb-a697-6066cfdbebde-openstack-config\") pod \"openstackclient\" (UID: \"86907013-52ae-4aeb-a697-6066cfdbebde\") " pod="openstack/openstackclient" Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.782238 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w92zg\" (UniqueName: \"kubernetes.io/projected/86907013-52ae-4aeb-a697-6066cfdbebde-kube-api-access-w92zg\") pod \"openstackclient\" (UID: \"86907013-52ae-4aeb-a697-6066cfdbebde\") " pod="openstack/openstackclient" Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.784097 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/86907013-52ae-4aeb-a697-6066cfdbebde-openstack-config\") pod \"openstackclient\" (UID: \"86907013-52ae-4aeb-a697-6066cfdbebde\") " pod="openstack/openstackclient" Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.788554 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/86907013-52ae-4aeb-a697-6066cfdbebde-combined-ca-bundle\") pod \"openstackclient\" (UID: \"86907013-52ae-4aeb-a697-6066cfdbebde\") " pod="openstack/openstackclient" Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.793078 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/86907013-52ae-4aeb-a697-6066cfdbebde-openstack-config-secret\") pod \"openstackclient\" (UID: \"86907013-52ae-4aeb-a697-6066cfdbebde\") " pod="openstack/openstackclient" Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.806149 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w92zg\" (UniqueName: \"kubernetes.io/projected/86907013-52ae-4aeb-a697-6066cfdbebde-kube-api-access-w92zg\") pod \"openstackclient\" (UID: \"86907013-52ae-4aeb-a697-6066cfdbebde\") " pod="openstack/openstackclient" Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.902892 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.975974 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"68176dd8-7480-4c30-8788-dd915e1568d5","Type":"ContainerStarted","Data":"4031c111af6edc9c9223a4738c68bc6eee8db9dcf2bbe623afb4105657f78838"} Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.976304 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 24 08:14:31 crc kubenswrapper[4691]: I1124 08:14:31.983131 4691 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="5b0f8f64-977e-4952-81cb-7627c6b29b2d" podUID="86907013-52ae-4aeb-a697-6066cfdbebde" Nov 24 08:14:32 crc kubenswrapper[4691]: I1124 08:14:32.066023 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 24 08:14:32 crc kubenswrapper[4691]: I1124 08:14:32.208609 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/5b0f8f64-977e-4952-81cb-7627c6b29b2d-openstack-config-secret\") pod \"5b0f8f64-977e-4952-81cb-7627c6b29b2d\" (UID: \"5b0f8f64-977e-4952-81cb-7627c6b29b2d\") " Nov 24 08:14:32 crc kubenswrapper[4691]: I1124 08:14:32.209141 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bwv6t\" (UniqueName: \"kubernetes.io/projected/5b0f8f64-977e-4952-81cb-7627c6b29b2d-kube-api-access-bwv6t\") pod \"5b0f8f64-977e-4952-81cb-7627c6b29b2d\" (UID: \"5b0f8f64-977e-4952-81cb-7627c6b29b2d\") " Nov 24 08:14:32 crc kubenswrapper[4691]: I1124 08:14:32.209241 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/5b0f8f64-977e-4952-81cb-7627c6b29b2d-openstack-config\") pod \"5b0f8f64-977e-4952-81cb-7627c6b29b2d\" (UID: \"5b0f8f64-977e-4952-81cb-7627c6b29b2d\") " Nov 24 08:14:32 crc kubenswrapper[4691]: I1124 08:14:32.209300 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b0f8f64-977e-4952-81cb-7627c6b29b2d-combined-ca-bundle\") pod \"5b0f8f64-977e-4952-81cb-7627c6b29b2d\" (UID: \"5b0f8f64-977e-4952-81cb-7627c6b29b2d\") " Nov 24 08:14:32 crc kubenswrapper[4691]: I1124 08:14:32.211919 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b0f8f64-977e-4952-81cb-7627c6b29b2d-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "5b0f8f64-977e-4952-81cb-7627c6b29b2d" (UID: "5b0f8f64-977e-4952-81cb-7627c6b29b2d"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:14:32 crc kubenswrapper[4691]: I1124 08:14:32.216224 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b0f8f64-977e-4952-81cb-7627c6b29b2d-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "5b0f8f64-977e-4952-81cb-7627c6b29b2d" (UID: "5b0f8f64-977e-4952-81cb-7627c6b29b2d"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:32 crc kubenswrapper[4691]: I1124 08:14:32.217533 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b0f8f64-977e-4952-81cb-7627c6b29b2d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5b0f8f64-977e-4952-81cb-7627c6b29b2d" (UID: "5b0f8f64-977e-4952-81cb-7627c6b29b2d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:32 crc kubenswrapper[4691]: I1124 08:14:32.233473 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b0f8f64-977e-4952-81cb-7627c6b29b2d-kube-api-access-bwv6t" (OuterVolumeSpecName: "kube-api-access-bwv6t") pod "5b0f8f64-977e-4952-81cb-7627c6b29b2d" (UID: "5b0f8f64-977e-4952-81cb-7627c6b29b2d"). InnerVolumeSpecName "kube-api-access-bwv6t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:14:32 crc kubenswrapper[4691]: I1124 08:14:32.311890 4691 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/5b0f8f64-977e-4952-81cb-7627c6b29b2d-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:32 crc kubenswrapper[4691]: I1124 08:14:32.311932 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b0f8f64-977e-4952-81cb-7627c6b29b2d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:32 crc kubenswrapper[4691]: I1124 08:14:32.311946 4691 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/5b0f8f64-977e-4952-81cb-7627c6b29b2d-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:32 crc kubenswrapper[4691]: I1124 08:14:32.311959 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bwv6t\" (UniqueName: \"kubernetes.io/projected/5b0f8f64-977e-4952-81cb-7627c6b29b2d-kube-api-access-bwv6t\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:32 crc kubenswrapper[4691]: I1124 08:14:32.434046 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 24 08:14:32 crc kubenswrapper[4691]: W1124 08:14:32.444608 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod86907013_52ae_4aeb_a697_6066cfdbebde.slice/crio-48f06a9b8ad20eee98cbf9d99a2d3b48521498bf40a13db5afaba7bf8dee1b61 WatchSource:0}: Error finding container 48f06a9b8ad20eee98cbf9d99a2d3b48521498bf40a13db5afaba7bf8dee1b61: Status 404 returned error can't find the container with id 48f06a9b8ad20eee98cbf9d99a2d3b48521498bf40a13db5afaba7bf8dee1b61 Nov 24 08:14:32 crc kubenswrapper[4691]: I1124 08:14:32.771485 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b0f8f64-977e-4952-81cb-7627c6b29b2d" path="/var/lib/kubelet/pods/5b0f8f64-977e-4952-81cb-7627c6b29b2d/volumes" Nov 24 08:14:32 crc kubenswrapper[4691]: I1124 08:14:32.987332 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"86907013-52ae-4aeb-a697-6066cfdbebde","Type":"ContainerStarted","Data":"48f06a9b8ad20eee98cbf9d99a2d3b48521498bf40a13db5afaba7bf8dee1b61"} Nov 24 08:14:32 crc kubenswrapper[4691]: I1124 08:14:32.990151 4691 generic.go:334] "Generic (PLEG): container finished" podID="567ed4cd-aaf3-4e52-be70-2f723075d545" containerID="6e9caea14320e2c5eb5fe2c000a17f511e9ec24ec9db6ea8fb6a9ec8334da824" exitCode=0 Nov 24 08:14:32 crc kubenswrapper[4691]: I1124 08:14:32.990187 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-77477f4d7b-kclfz" event={"ID":"567ed4cd-aaf3-4e52-be70-2f723075d545","Type":"ContainerDied","Data":"6e9caea14320e2c5eb5fe2c000a17f511e9ec24ec9db6ea8fb6a9ec8334da824"} Nov 24 08:14:32 crc kubenswrapper[4691]: I1124 08:14:32.992331 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"68176dd8-7480-4c30-8788-dd915e1568d5","Type":"ContainerStarted","Data":"34d0fd09db1481d9dee6c0ae9075c122d06dd56c009a038a8f5cc6c1d9eb29dd"} Nov 24 08:14:32 crc kubenswrapper[4691]: I1124 08:14:32.992383 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 24 08:14:33 crc kubenswrapper[4691]: I1124 08:14:33.017692 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.01767475 podStartE2EDuration="4.01767475s" podCreationTimestamp="2025-11-24 08:14:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:14:33.017494565 +0000 UTC m=+1035.016443814" watchObservedRunningTime="2025-11-24 08:14:33.01767475 +0000 UTC m=+1035.016623999" Nov 24 08:14:33 crc kubenswrapper[4691]: I1124 08:14:33.020683 4691 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="5b0f8f64-977e-4952-81cb-7627c6b29b2d" podUID="86907013-52ae-4aeb-a697-6066cfdbebde" Nov 24 08:14:34 crc kubenswrapper[4691]: I1124 08:14:34.097587 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-77477f4d7b-kclfz" podUID="567ed4cd-aaf3-4e52-be70-2f723075d545" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.152:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.152:8443: connect: connection refused" Nov 24 08:14:35 crc kubenswrapper[4691]: I1124 08:14:35.355878 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.023355 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-6bf54cf5bc-7wgwz"] Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.033592 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.041349 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.041401 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.044705 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.096862 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-6bf54cf5bc-7wgwz"] Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.192888 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a9213e2-4a1f-4d15-ab02-472c467babfe-combined-ca-bundle\") pod \"swift-proxy-6bf54cf5bc-7wgwz\" (UID: \"9a9213e2-4a1f-4d15-ab02-472c467babfe\") " pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.192948 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a9213e2-4a1f-4d15-ab02-472c467babfe-run-httpd\") pod \"swift-proxy-6bf54cf5bc-7wgwz\" (UID: \"9a9213e2-4a1f-4d15-ab02-472c467babfe\") " pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.193000 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/9a9213e2-4a1f-4d15-ab02-472c467babfe-etc-swift\") pod \"swift-proxy-6bf54cf5bc-7wgwz\" (UID: \"9a9213e2-4a1f-4d15-ab02-472c467babfe\") " pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.193023 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a9213e2-4a1f-4d15-ab02-472c467babfe-config-data\") pod \"swift-proxy-6bf54cf5bc-7wgwz\" (UID: \"9a9213e2-4a1f-4d15-ab02-472c467babfe\") " pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.193067 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a9213e2-4a1f-4d15-ab02-472c467babfe-public-tls-certs\") pod \"swift-proxy-6bf54cf5bc-7wgwz\" (UID: \"9a9213e2-4a1f-4d15-ab02-472c467babfe\") " pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.193101 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a9213e2-4a1f-4d15-ab02-472c467babfe-log-httpd\") pod \"swift-proxy-6bf54cf5bc-7wgwz\" (UID: \"9a9213e2-4a1f-4d15-ab02-472c467babfe\") " pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.193321 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a9213e2-4a1f-4d15-ab02-472c467babfe-internal-tls-certs\") pod \"swift-proxy-6bf54cf5bc-7wgwz\" (UID: \"9a9213e2-4a1f-4d15-ab02-472c467babfe\") " pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.193386 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vhmp\" (UniqueName: \"kubernetes.io/projected/9a9213e2-4a1f-4d15-ab02-472c467babfe-kube-api-access-4vhmp\") pod \"swift-proxy-6bf54cf5bc-7wgwz\" (UID: \"9a9213e2-4a1f-4d15-ab02-472c467babfe\") " pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.294873 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vhmp\" (UniqueName: \"kubernetes.io/projected/9a9213e2-4a1f-4d15-ab02-472c467babfe-kube-api-access-4vhmp\") pod \"swift-proxy-6bf54cf5bc-7wgwz\" (UID: \"9a9213e2-4a1f-4d15-ab02-472c467babfe\") " pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.294990 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a9213e2-4a1f-4d15-ab02-472c467babfe-combined-ca-bundle\") pod \"swift-proxy-6bf54cf5bc-7wgwz\" (UID: \"9a9213e2-4a1f-4d15-ab02-472c467babfe\") " pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.295017 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a9213e2-4a1f-4d15-ab02-472c467babfe-run-httpd\") pod \"swift-proxy-6bf54cf5bc-7wgwz\" (UID: \"9a9213e2-4a1f-4d15-ab02-472c467babfe\") " pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.295056 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/9a9213e2-4a1f-4d15-ab02-472c467babfe-etc-swift\") pod \"swift-proxy-6bf54cf5bc-7wgwz\" (UID: \"9a9213e2-4a1f-4d15-ab02-472c467babfe\") " pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.295074 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a9213e2-4a1f-4d15-ab02-472c467babfe-config-data\") pod \"swift-proxy-6bf54cf5bc-7wgwz\" (UID: \"9a9213e2-4a1f-4d15-ab02-472c467babfe\") " pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.295116 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a9213e2-4a1f-4d15-ab02-472c467babfe-public-tls-certs\") pod \"swift-proxy-6bf54cf5bc-7wgwz\" (UID: \"9a9213e2-4a1f-4d15-ab02-472c467babfe\") " pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.295147 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a9213e2-4a1f-4d15-ab02-472c467babfe-log-httpd\") pod \"swift-proxy-6bf54cf5bc-7wgwz\" (UID: \"9a9213e2-4a1f-4d15-ab02-472c467babfe\") " pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.295174 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a9213e2-4a1f-4d15-ab02-472c467babfe-internal-tls-certs\") pod \"swift-proxy-6bf54cf5bc-7wgwz\" (UID: \"9a9213e2-4a1f-4d15-ab02-472c467babfe\") " pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.296870 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a9213e2-4a1f-4d15-ab02-472c467babfe-run-httpd\") pod \"swift-proxy-6bf54cf5bc-7wgwz\" (UID: \"9a9213e2-4a1f-4d15-ab02-472c467babfe\") " pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.299847 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a9213e2-4a1f-4d15-ab02-472c467babfe-log-httpd\") pod \"swift-proxy-6bf54cf5bc-7wgwz\" (UID: \"9a9213e2-4a1f-4d15-ab02-472c467babfe\") " pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.302084 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/9a9213e2-4a1f-4d15-ab02-472c467babfe-etc-swift\") pod \"swift-proxy-6bf54cf5bc-7wgwz\" (UID: \"9a9213e2-4a1f-4d15-ab02-472c467babfe\") " pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.302841 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a9213e2-4a1f-4d15-ab02-472c467babfe-internal-tls-certs\") pod \"swift-proxy-6bf54cf5bc-7wgwz\" (UID: \"9a9213e2-4a1f-4d15-ab02-472c467babfe\") " pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.303336 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9a9213e2-4a1f-4d15-ab02-472c467babfe-public-tls-certs\") pod \"swift-proxy-6bf54cf5bc-7wgwz\" (UID: \"9a9213e2-4a1f-4d15-ab02-472c467babfe\") " pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.303382 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a9213e2-4a1f-4d15-ab02-472c467babfe-combined-ca-bundle\") pod \"swift-proxy-6bf54cf5bc-7wgwz\" (UID: \"9a9213e2-4a1f-4d15-ab02-472c467babfe\") " pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.305224 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a9213e2-4a1f-4d15-ab02-472c467babfe-config-data\") pod \"swift-proxy-6bf54cf5bc-7wgwz\" (UID: \"9a9213e2-4a1f-4d15-ab02-472c467babfe\") " pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.313830 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vhmp\" (UniqueName: \"kubernetes.io/projected/9a9213e2-4a1f-4d15-ab02-472c467babfe-kube-api-access-4vhmp\") pod \"swift-proxy-6bf54cf5bc-7wgwz\" (UID: \"9a9213e2-4a1f-4d15-ab02-472c467babfe\") " pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.376729 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.456562 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.457786 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="82e3978a-d699-4b7b-ac29-eee2e5d347f3" containerName="ceilometer-central-agent" containerID="cri-o://757732dab03be290ae443b94ad2196894fd2099add81bbd1ad7277bd3a539224" gracePeriod=30 Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.459005 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="82e3978a-d699-4b7b-ac29-eee2e5d347f3" containerName="proxy-httpd" containerID="cri-o://67eb28b13cd17146ee41621e31b17128ce5fb15e3d22a2893fa8fc7cc3656296" gracePeriod=30 Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.459101 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="82e3978a-d699-4b7b-ac29-eee2e5d347f3" containerName="sg-core" containerID="cri-o://884f0abee83fda280a286fe75c7b6a3a0a033b6ac9d1395088868097dfa2ae4f" gracePeriod=30 Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.459201 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="82e3978a-d699-4b7b-ac29-eee2e5d347f3" containerName="ceilometer-notification-agent" containerID="cri-o://6eae479600c567b7a7c67e54a0c0f5726535f14c0321b2309b5c75726c7752ce" gracePeriod=30 Nov 24 08:14:36 crc kubenswrapper[4691]: I1124 08:14:36.490902 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="82e3978a-d699-4b7b-ac29-eee2e5d347f3" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.170:3000/\": EOF" Nov 24 08:14:37 crc kubenswrapper[4691]: I1124 08:14:37.057708 4691 generic.go:334] "Generic (PLEG): container finished" podID="82e3978a-d699-4b7b-ac29-eee2e5d347f3" containerID="67eb28b13cd17146ee41621e31b17128ce5fb15e3d22a2893fa8fc7cc3656296" exitCode=0 Nov 24 08:14:37 crc kubenswrapper[4691]: I1124 08:14:37.057756 4691 generic.go:334] "Generic (PLEG): container finished" podID="82e3978a-d699-4b7b-ac29-eee2e5d347f3" containerID="884f0abee83fda280a286fe75c7b6a3a0a033b6ac9d1395088868097dfa2ae4f" exitCode=2 Nov 24 08:14:37 crc kubenswrapper[4691]: I1124 08:14:37.057787 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82e3978a-d699-4b7b-ac29-eee2e5d347f3","Type":"ContainerDied","Data":"67eb28b13cd17146ee41621e31b17128ce5fb15e3d22a2893fa8fc7cc3656296"} Nov 24 08:14:37 crc kubenswrapper[4691]: I1124 08:14:37.057825 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82e3978a-d699-4b7b-ac29-eee2e5d347f3","Type":"ContainerDied","Data":"884f0abee83fda280a286fe75c7b6a3a0a033b6ac9d1395088868097dfa2ae4f"} Nov 24 08:14:37 crc kubenswrapper[4691]: I1124 08:14:37.146647 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-6bf54cf5bc-7wgwz"] Nov 24 08:14:37 crc kubenswrapper[4691]: W1124 08:14:37.153633 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9a9213e2_4a1f_4d15_ab02_472c467babfe.slice/crio-29dac297c327257dfcd33362f52f747f9ad03a163a6b795f3a3ff31ed4946082 WatchSource:0}: Error finding container 29dac297c327257dfcd33362f52f747f9ad03a163a6b795f3a3ff31ed4946082: Status 404 returned error can't find the container with id 29dac297c327257dfcd33362f52f747f9ad03a163a6b795f3a3ff31ed4946082 Nov 24 08:14:37 crc kubenswrapper[4691]: I1124 08:14:37.677729 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 08:14:37 crc kubenswrapper[4691]: I1124 08:14:37.678657 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="08ef6b82-8aff-4b11-a0be-9e04670b96b7" containerName="glance-log" containerID="cri-o://f70b2f9247b81635da2926d0c6cc2fa525d9763c1173574682335fe1a03af738" gracePeriod=30 Nov 24 08:14:37 crc kubenswrapper[4691]: I1124 08:14:37.679386 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="08ef6b82-8aff-4b11-a0be-9e04670b96b7" containerName="glance-httpd" containerID="cri-o://2fcedd905e14a1d955cf31b9414c55e5729165fb33ec43be8b06caf8c3d5383c" gracePeriod=30 Nov 24 08:14:38 crc kubenswrapper[4691]: I1124 08:14:38.072790 4691 generic.go:334] "Generic (PLEG): container finished" podID="82e3978a-d699-4b7b-ac29-eee2e5d347f3" containerID="757732dab03be290ae443b94ad2196894fd2099add81bbd1ad7277bd3a539224" exitCode=0 Nov 24 08:14:38 crc kubenswrapper[4691]: I1124 08:14:38.072872 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82e3978a-d699-4b7b-ac29-eee2e5d347f3","Type":"ContainerDied","Data":"757732dab03be290ae443b94ad2196894fd2099add81bbd1ad7277bd3a539224"} Nov 24 08:14:38 crc kubenswrapper[4691]: I1124 08:14:38.075345 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" event={"ID":"9a9213e2-4a1f-4d15-ab02-472c467babfe","Type":"ContainerStarted","Data":"1b8198eecddf0ce0d233b07aba09d351e3a095231f84a065d4fd75ee6151359d"} Nov 24 08:14:38 crc kubenswrapper[4691]: I1124 08:14:38.075396 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" event={"ID":"9a9213e2-4a1f-4d15-ab02-472c467babfe","Type":"ContainerStarted","Data":"12d3056deb6b42c69f398ec5107b4292bfebe7cf7e93a4431b28109e67466659"} Nov 24 08:14:38 crc kubenswrapper[4691]: I1124 08:14:38.075410 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" event={"ID":"9a9213e2-4a1f-4d15-ab02-472c467babfe","Type":"ContainerStarted","Data":"29dac297c327257dfcd33362f52f747f9ad03a163a6b795f3a3ff31ed4946082"} Nov 24 08:14:38 crc kubenswrapper[4691]: I1124 08:14:38.075492 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" Nov 24 08:14:38 crc kubenswrapper[4691]: I1124 08:14:38.079425 4691 generic.go:334] "Generic (PLEG): container finished" podID="08ef6b82-8aff-4b11-a0be-9e04670b96b7" containerID="f70b2f9247b81635da2926d0c6cc2fa525d9763c1173574682335fe1a03af738" exitCode=143 Nov 24 08:14:38 crc kubenswrapper[4691]: I1124 08:14:38.079480 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"08ef6b82-8aff-4b11-a0be-9e04670b96b7","Type":"ContainerDied","Data":"f70b2f9247b81635da2926d0c6cc2fa525d9763c1173574682335fe1a03af738"} Nov 24 08:14:38 crc kubenswrapper[4691]: I1124 08:14:38.821534 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" podStartSLOduration=3.821096129 podStartE2EDuration="3.821096129s" podCreationTimestamp="2025-11-24 08:14:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:14:38.107922431 +0000 UTC m=+1040.106871700" watchObservedRunningTime="2025-11-24 08:14:38.821096129 +0000 UTC m=+1040.820045378" Nov 24 08:14:39 crc kubenswrapper[4691]: I1124 08:14:39.111042 4691 generic.go:334] "Generic (PLEG): container finished" podID="82e3978a-d699-4b7b-ac29-eee2e5d347f3" containerID="6eae479600c567b7a7c67e54a0c0f5726535f14c0321b2309b5c75726c7752ce" exitCode=0 Nov 24 08:14:39 crc kubenswrapper[4691]: I1124 08:14:39.111111 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82e3978a-d699-4b7b-ac29-eee2e5d347f3","Type":"ContainerDied","Data":"6eae479600c567b7a7c67e54a0c0f5726535f14c0321b2309b5c75726c7752ce"} Nov 24 08:14:39 crc kubenswrapper[4691]: I1124 08:14:39.111484 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" Nov 24 08:14:39 crc kubenswrapper[4691]: I1124 08:14:39.684135 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 08:14:39 crc kubenswrapper[4691]: I1124 08:14:39.684893 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="8626dac3-0df1-42b8-8ea2-52239b7b73c3" containerName="glance-log" containerID="cri-o://4c790e2944796a4992eff9a8a3b45102a0210263bf2eb0726d29adc7c9799463" gracePeriod=30 Nov 24 08:14:39 crc kubenswrapper[4691]: I1124 08:14:39.685172 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="8626dac3-0df1-42b8-8ea2-52239b7b73c3" containerName="glance-httpd" containerID="cri-o://2dd5802e29187d4f6fe90a3efea1e88df7d7cbfb2877e458550a436462c05df1" gracePeriod=30 Nov 24 08:14:40 crc kubenswrapper[4691]: I1124 08:14:40.123652 4691 generic.go:334] "Generic (PLEG): container finished" podID="8626dac3-0df1-42b8-8ea2-52239b7b73c3" containerID="4c790e2944796a4992eff9a8a3b45102a0210263bf2eb0726d29adc7c9799463" exitCode=143 Nov 24 08:14:40 crc kubenswrapper[4691]: I1124 08:14:40.123737 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8626dac3-0df1-42b8-8ea2-52239b7b73c3","Type":"ContainerDied","Data":"4c790e2944796a4992eff9a8a3b45102a0210263bf2eb0726d29adc7c9799463"} Nov 24 08:14:40 crc kubenswrapper[4691]: I1124 08:14:40.667918 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 24 08:14:41 crc kubenswrapper[4691]: I1124 08:14:41.135593 4691 generic.go:334] "Generic (PLEG): container finished" podID="08ef6b82-8aff-4b11-a0be-9e04670b96b7" containerID="2fcedd905e14a1d955cf31b9414c55e5729165fb33ec43be8b06caf8c3d5383c" exitCode=0 Nov 24 08:14:41 crc kubenswrapper[4691]: I1124 08:14:41.135638 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"08ef6b82-8aff-4b11-a0be-9e04670b96b7","Type":"ContainerDied","Data":"2fcedd905e14a1d955cf31b9414c55e5729165fb33ec43be8b06caf8c3d5383c"} Nov 24 08:14:43 crc kubenswrapper[4691]: I1124 08:14:43.157024 4691 generic.go:334] "Generic (PLEG): container finished" podID="8626dac3-0df1-42b8-8ea2-52239b7b73c3" containerID="2dd5802e29187d4f6fe90a3efea1e88df7d7cbfb2877e458550a436462c05df1" exitCode=0 Nov 24 08:14:43 crc kubenswrapper[4691]: I1124 08:14:43.157335 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8626dac3-0df1-42b8-8ea2-52239b7b73c3","Type":"ContainerDied","Data":"2dd5802e29187d4f6fe90a3efea1e88df7d7cbfb2877e458550a436462c05df1"} Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.117789 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-77477f4d7b-kclfz" podUID="567ed4cd-aaf3-4e52-be70-2f723075d545" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.152:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.152:8443: connect: connection refused" Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.301494 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.404933 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82e3978a-d699-4b7b-ac29-eee2e5d347f3-combined-ca-bundle\") pod \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\" (UID: \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\") " Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.405895 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82e3978a-d699-4b7b-ac29-eee2e5d347f3-config-data\") pod \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\" (UID: \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\") " Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.406074 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/82e3978a-d699-4b7b-ac29-eee2e5d347f3-scripts\") pod \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\" (UID: \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\") " Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.406197 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82e3978a-d699-4b7b-ac29-eee2e5d347f3-log-httpd\") pod \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\" (UID: \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\") " Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.406318 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xlxdm\" (UniqueName: \"kubernetes.io/projected/82e3978a-d699-4b7b-ac29-eee2e5d347f3-kube-api-access-xlxdm\") pod \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\" (UID: \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\") " Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.406371 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/82e3978a-d699-4b7b-ac29-eee2e5d347f3-sg-core-conf-yaml\") pod \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\" (UID: \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\") " Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.406404 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82e3978a-d699-4b7b-ac29-eee2e5d347f3-run-httpd\") pod \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\" (UID: \"82e3978a-d699-4b7b-ac29-eee2e5d347f3\") " Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.407028 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/82e3978a-d699-4b7b-ac29-eee2e5d347f3-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "82e3978a-d699-4b7b-ac29-eee2e5d347f3" (UID: "82e3978a-d699-4b7b-ac29-eee2e5d347f3"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.407478 4691 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82e3978a-d699-4b7b-ac29-eee2e5d347f3-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.407770 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/82e3978a-d699-4b7b-ac29-eee2e5d347f3-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "82e3978a-d699-4b7b-ac29-eee2e5d347f3" (UID: "82e3978a-d699-4b7b-ac29-eee2e5d347f3"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.415606 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82e3978a-d699-4b7b-ac29-eee2e5d347f3-kube-api-access-xlxdm" (OuterVolumeSpecName: "kube-api-access-xlxdm") pod "82e3978a-d699-4b7b-ac29-eee2e5d347f3" (UID: "82e3978a-d699-4b7b-ac29-eee2e5d347f3"). InnerVolumeSpecName "kube-api-access-xlxdm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.416705 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82e3978a-d699-4b7b-ac29-eee2e5d347f3-scripts" (OuterVolumeSpecName: "scripts") pod "82e3978a-d699-4b7b-ac29-eee2e5d347f3" (UID: "82e3978a-d699-4b7b-ac29-eee2e5d347f3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.433470 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82e3978a-d699-4b7b-ac29-eee2e5d347f3-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "82e3978a-d699-4b7b-ac29-eee2e5d347f3" (UID: "82e3978a-d699-4b7b-ac29-eee2e5d347f3"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.489907 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.494337 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82e3978a-d699-4b7b-ac29-eee2e5d347f3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "82e3978a-d699-4b7b-ac29-eee2e5d347f3" (UID: "82e3978a-d699-4b7b-ac29-eee2e5d347f3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.510267 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dnv92\" (UniqueName: \"kubernetes.io/projected/08ef6b82-8aff-4b11-a0be-9e04670b96b7-kube-api-access-dnv92\") pod \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\" (UID: \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\") " Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.510337 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/08ef6b82-8aff-4b11-a0be-9e04670b96b7-public-tls-certs\") pod \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\" (UID: \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\") " Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.510389 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08ef6b82-8aff-4b11-a0be-9e04670b96b7-config-data\") pod \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\" (UID: \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\") " Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.510483 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/08ef6b82-8aff-4b11-a0be-9e04670b96b7-httpd-run\") pod \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\" (UID: \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\") " Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.510538 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/08ef6b82-8aff-4b11-a0be-9e04670b96b7-scripts\") pod \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\" (UID: \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\") " Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.510605 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08ef6b82-8aff-4b11-a0be-9e04670b96b7-logs\") pod \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\" (UID: \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\") " Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.510771 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\" (UID: \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\") " Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.510829 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08ef6b82-8aff-4b11-a0be-9e04670b96b7-combined-ca-bundle\") pod \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\" (UID: \"08ef6b82-8aff-4b11-a0be-9e04670b96b7\") " Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.511573 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/08ef6b82-8aff-4b11-a0be-9e04670b96b7-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "08ef6b82-8aff-4b11-a0be-9e04670b96b7" (UID: "08ef6b82-8aff-4b11-a0be-9e04670b96b7"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.512997 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/08ef6b82-8aff-4b11-a0be-9e04670b96b7-logs" (OuterVolumeSpecName: "logs") pod "08ef6b82-8aff-4b11-a0be-9e04670b96b7" (UID: "08ef6b82-8aff-4b11-a0be-9e04670b96b7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.513474 4691 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/08ef6b82-8aff-4b11-a0be-9e04670b96b7-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.513495 4691 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08ef6b82-8aff-4b11-a0be-9e04670b96b7-logs\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.513505 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xlxdm\" (UniqueName: \"kubernetes.io/projected/82e3978a-d699-4b7b-ac29-eee2e5d347f3-kube-api-access-xlxdm\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.513518 4691 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/82e3978a-d699-4b7b-ac29-eee2e5d347f3-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.513528 4691 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/82e3978a-d699-4b7b-ac29-eee2e5d347f3-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.513538 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82e3978a-d699-4b7b-ac29-eee2e5d347f3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.513547 4691 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/82e3978a-d699-4b7b-ac29-eee2e5d347f3-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.517715 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08ef6b82-8aff-4b11-a0be-9e04670b96b7-kube-api-access-dnv92" (OuterVolumeSpecName: "kube-api-access-dnv92") pod "08ef6b82-8aff-4b11-a0be-9e04670b96b7" (UID: "08ef6b82-8aff-4b11-a0be-9e04670b96b7"). InnerVolumeSpecName "kube-api-access-dnv92". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.521200 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08ef6b82-8aff-4b11-a0be-9e04670b96b7-scripts" (OuterVolumeSpecName: "scripts") pod "08ef6b82-8aff-4b11-a0be-9e04670b96b7" (UID: "08ef6b82-8aff-4b11-a0be-9e04670b96b7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.532795 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "08ef6b82-8aff-4b11-a0be-9e04670b96b7" (UID: "08ef6b82-8aff-4b11-a0be-9e04670b96b7"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.586268 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08ef6b82-8aff-4b11-a0be-9e04670b96b7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "08ef6b82-8aff-4b11-a0be-9e04670b96b7" (UID: "08ef6b82-8aff-4b11-a0be-9e04670b96b7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.597426 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82e3978a-d699-4b7b-ac29-eee2e5d347f3-config-data" (OuterVolumeSpecName: "config-data") pod "82e3978a-d699-4b7b-ac29-eee2e5d347f3" (UID: "82e3978a-d699-4b7b-ac29-eee2e5d347f3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.601603 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08ef6b82-8aff-4b11-a0be-9e04670b96b7-config-data" (OuterVolumeSpecName: "config-data") pod "08ef6b82-8aff-4b11-a0be-9e04670b96b7" (UID: "08ef6b82-8aff-4b11-a0be-9e04670b96b7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.609298 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08ef6b82-8aff-4b11-a0be-9e04670b96b7-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "08ef6b82-8aff-4b11-a0be-9e04670b96b7" (UID: "08ef6b82-8aff-4b11-a0be-9e04670b96b7"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.615973 4691 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.616019 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08ef6b82-8aff-4b11-a0be-9e04670b96b7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.616036 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82e3978a-d699-4b7b-ac29-eee2e5d347f3-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.616049 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dnv92\" (UniqueName: \"kubernetes.io/projected/08ef6b82-8aff-4b11-a0be-9e04670b96b7-kube-api-access-dnv92\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.616063 4691 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/08ef6b82-8aff-4b11-a0be-9e04670b96b7-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.616075 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08ef6b82-8aff-4b11-a0be-9e04670b96b7-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.616086 4691 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/08ef6b82-8aff-4b11-a0be-9e04670b96b7-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.636229 4691 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Nov 24 08:14:44 crc kubenswrapper[4691]: I1124 08:14:44.718596 4691 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.216281 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"08ef6b82-8aff-4b11-a0be-9e04670b96b7","Type":"ContainerDied","Data":"71a233985b2bf39462857dfe54a9ceaaa36bdfe3a035db9fd44667362b53b3c0"} Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.216322 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.216327 4691 scope.go:117] "RemoveContainer" containerID="2fcedd905e14a1d955cf31b9414c55e5729165fb33ec43be8b06caf8c3d5383c" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.224737 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"82e3978a-d699-4b7b-ac29-eee2e5d347f3","Type":"ContainerDied","Data":"2989420e91532119a9144e01218b6d19033ee8292ab1d6d30589f6f3fe0edd7e"} Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.224822 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.230308 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"86907013-52ae-4aeb-a697-6066cfdbebde","Type":"ContainerStarted","Data":"80875e569afe8799dcce36f5ff1a7bbf49d1feddc5695c17ddbda46c8153d496"} Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.254370 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.279814 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.591902592 podStartE2EDuration="14.279792229s" podCreationTimestamp="2025-11-24 08:14:31 +0000 UTC" firstStartedPulling="2025-11-24 08:14:32.447408703 +0000 UTC m=+1034.446357952" lastFinishedPulling="2025-11-24 08:14:44.13529834 +0000 UTC m=+1046.134247589" observedRunningTime="2025-11-24 08:14:45.265170645 +0000 UTC m=+1047.264119894" watchObservedRunningTime="2025-11-24 08:14:45.279792229 +0000 UTC m=+1047.278741478" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.281728 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.288140 4691 scope.go:117] "RemoveContainer" containerID="f70b2f9247b81635da2926d0c6cc2fa525d9763c1173574682335fe1a03af738" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.310779 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 08:14:45 crc kubenswrapper[4691]: E1124 08:14:45.311440 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82e3978a-d699-4b7b-ac29-eee2e5d347f3" containerName="ceilometer-central-agent" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.311469 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="82e3978a-d699-4b7b-ac29-eee2e5d347f3" containerName="ceilometer-central-agent" Nov 24 08:14:45 crc kubenswrapper[4691]: E1124 08:14:45.311487 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82e3978a-d699-4b7b-ac29-eee2e5d347f3" containerName="proxy-httpd" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.311493 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="82e3978a-d699-4b7b-ac29-eee2e5d347f3" containerName="proxy-httpd" Nov 24 08:14:45 crc kubenswrapper[4691]: E1124 08:14:45.311512 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08ef6b82-8aff-4b11-a0be-9e04670b96b7" containerName="glance-log" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.311518 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="08ef6b82-8aff-4b11-a0be-9e04670b96b7" containerName="glance-log" Nov 24 08:14:45 crc kubenswrapper[4691]: E1124 08:14:45.311537 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82e3978a-d699-4b7b-ac29-eee2e5d347f3" containerName="sg-core" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.311542 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="82e3978a-d699-4b7b-ac29-eee2e5d347f3" containerName="sg-core" Nov 24 08:14:45 crc kubenswrapper[4691]: E1124 08:14:45.311550 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08ef6b82-8aff-4b11-a0be-9e04670b96b7" containerName="glance-httpd" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.311557 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="08ef6b82-8aff-4b11-a0be-9e04670b96b7" containerName="glance-httpd" Nov 24 08:14:45 crc kubenswrapper[4691]: E1124 08:14:45.311571 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82e3978a-d699-4b7b-ac29-eee2e5d347f3" containerName="ceilometer-notification-agent" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.311577 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="82e3978a-d699-4b7b-ac29-eee2e5d347f3" containerName="ceilometer-notification-agent" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.311744 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="82e3978a-d699-4b7b-ac29-eee2e5d347f3" containerName="ceilometer-central-agent" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.311759 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="08ef6b82-8aff-4b11-a0be-9e04670b96b7" containerName="glance-httpd" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.311771 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="82e3978a-d699-4b7b-ac29-eee2e5d347f3" containerName="sg-core" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.311788 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="82e3978a-d699-4b7b-ac29-eee2e5d347f3" containerName="ceilometer-notification-agent" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.311799 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="82e3978a-d699-4b7b-ac29-eee2e5d347f3" containerName="proxy-httpd" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.311809 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="08ef6b82-8aff-4b11-a0be-9e04670b96b7" containerName="glance-log" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.312892 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.315093 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.315335 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.327579 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.344776 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.351297 4691 scope.go:117] "RemoveContainer" containerID="67eb28b13cd17146ee41621e31b17128ce5fb15e3d22a2893fa8fc7cc3656296" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.352181 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.364926 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.367694 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.372139 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.375089 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.396746 4691 scope.go:117] "RemoveContainer" containerID="884f0abee83fda280a286fe75c7b6a3a0a033b6ac9d1395088868097dfa2ae4f" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.402614 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.431541 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3-logs\") pod \"glance-default-external-api-0\" (UID: \"02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3\") " pod="openstack/glance-default-external-api-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.431594 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3-config-data\") pod \"glance-default-external-api-0\" (UID: \"02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3\") " pod="openstack/glance-default-external-api-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.431619 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/366a5d80-56a5-4847-9bb6-2e588797c1c7-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"366a5d80-56a5-4847-9bb6-2e588797c1c7\") " pod="openstack/ceilometer-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.431635 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3\") " pod="openstack/glance-default-external-api-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.431659 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3-scripts\") pod \"glance-default-external-api-0\" (UID: \"02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3\") " pod="openstack/glance-default-external-api-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.431683 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/366a5d80-56a5-4847-9bb6-2e588797c1c7-run-httpd\") pod \"ceilometer-0\" (UID: \"366a5d80-56a5-4847-9bb6-2e588797c1c7\") " pod="openstack/ceilometer-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.431708 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3\") " pod="openstack/glance-default-external-api-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.431739 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3\") " pod="openstack/glance-default-external-api-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.431759 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/366a5d80-56a5-4847-9bb6-2e588797c1c7-log-httpd\") pod \"ceilometer-0\" (UID: \"366a5d80-56a5-4847-9bb6-2e588797c1c7\") " pod="openstack/ceilometer-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.431775 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/366a5d80-56a5-4847-9bb6-2e588797c1c7-config-data\") pod \"ceilometer-0\" (UID: \"366a5d80-56a5-4847-9bb6-2e588797c1c7\") " pod="openstack/ceilometer-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.431797 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/366a5d80-56a5-4847-9bb6-2e588797c1c7-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"366a5d80-56a5-4847-9bb6-2e588797c1c7\") " pod="openstack/ceilometer-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.431819 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/366a5d80-56a5-4847-9bb6-2e588797c1c7-scripts\") pod \"ceilometer-0\" (UID: \"366a5d80-56a5-4847-9bb6-2e588797c1c7\") " pod="openstack/ceilometer-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.431843 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vndg7\" (UniqueName: \"kubernetes.io/projected/02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3-kube-api-access-vndg7\") pod \"glance-default-external-api-0\" (UID: \"02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3\") " pod="openstack/glance-default-external-api-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.431877 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8nfdl\" (UniqueName: \"kubernetes.io/projected/366a5d80-56a5-4847-9bb6-2e588797c1c7-kube-api-access-8nfdl\") pod \"ceilometer-0\" (UID: \"366a5d80-56a5-4847-9bb6-2e588797c1c7\") " pod="openstack/ceilometer-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.431920 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3\") " pod="openstack/glance-default-external-api-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.434373 4691 scope.go:117] "RemoveContainer" containerID="6eae479600c567b7a7c67e54a0c0f5726535f14c0321b2309b5c75726c7752ce" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.467225 4691 scope.go:117] "RemoveContainer" containerID="757732dab03be290ae443b94ad2196894fd2099add81bbd1ad7277bd3a539224" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.534836 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3\") " pod="openstack/glance-default-external-api-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.534900 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/366a5d80-56a5-4847-9bb6-2e588797c1c7-log-httpd\") pod \"ceilometer-0\" (UID: \"366a5d80-56a5-4847-9bb6-2e588797c1c7\") " pod="openstack/ceilometer-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.534935 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/366a5d80-56a5-4847-9bb6-2e588797c1c7-config-data\") pod \"ceilometer-0\" (UID: \"366a5d80-56a5-4847-9bb6-2e588797c1c7\") " pod="openstack/ceilometer-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.534967 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/366a5d80-56a5-4847-9bb6-2e588797c1c7-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"366a5d80-56a5-4847-9bb6-2e588797c1c7\") " pod="openstack/ceilometer-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.534999 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/366a5d80-56a5-4847-9bb6-2e588797c1c7-scripts\") pod \"ceilometer-0\" (UID: \"366a5d80-56a5-4847-9bb6-2e588797c1c7\") " pod="openstack/ceilometer-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.535033 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vndg7\" (UniqueName: \"kubernetes.io/projected/02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3-kube-api-access-vndg7\") pod \"glance-default-external-api-0\" (UID: \"02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3\") " pod="openstack/glance-default-external-api-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.535074 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8nfdl\" (UniqueName: \"kubernetes.io/projected/366a5d80-56a5-4847-9bb6-2e588797c1c7-kube-api-access-8nfdl\") pod \"ceilometer-0\" (UID: \"366a5d80-56a5-4847-9bb6-2e588797c1c7\") " pod="openstack/ceilometer-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.535122 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3\") " pod="openstack/glance-default-external-api-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.535165 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3-logs\") pod \"glance-default-external-api-0\" (UID: \"02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3\") " pod="openstack/glance-default-external-api-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.535190 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3-config-data\") pod \"glance-default-external-api-0\" (UID: \"02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3\") " pod="openstack/glance-default-external-api-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.535211 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/366a5d80-56a5-4847-9bb6-2e588797c1c7-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"366a5d80-56a5-4847-9bb6-2e588797c1c7\") " pod="openstack/ceilometer-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.535231 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3\") " pod="openstack/glance-default-external-api-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.535258 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3-scripts\") pod \"glance-default-external-api-0\" (UID: \"02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3\") " pod="openstack/glance-default-external-api-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.535285 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/366a5d80-56a5-4847-9bb6-2e588797c1c7-run-httpd\") pod \"ceilometer-0\" (UID: \"366a5d80-56a5-4847-9bb6-2e588797c1c7\") " pod="openstack/ceilometer-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.535316 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3\") " pod="openstack/glance-default-external-api-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.539285 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/366a5d80-56a5-4847-9bb6-2e588797c1c7-log-httpd\") pod \"ceilometer-0\" (UID: \"366a5d80-56a5-4847-9bb6-2e588797c1c7\") " pod="openstack/ceilometer-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.540121 4691 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-external-api-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.542739 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3-logs\") pod \"glance-default-external-api-0\" (UID: \"02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3\") " pod="openstack/glance-default-external-api-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.543692 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/366a5d80-56a5-4847-9bb6-2e588797c1c7-run-httpd\") pod \"ceilometer-0\" (UID: \"366a5d80-56a5-4847-9bb6-2e588797c1c7\") " pod="openstack/ceilometer-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.543935 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3\") " pod="openstack/glance-default-external-api-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.543974 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3\") " pod="openstack/glance-default-external-api-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.547734 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3-config-data\") pod \"glance-default-external-api-0\" (UID: \"02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3\") " pod="openstack/glance-default-external-api-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.553867 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/366a5d80-56a5-4847-9bb6-2e588797c1c7-scripts\") pod \"ceilometer-0\" (UID: \"366a5d80-56a5-4847-9bb6-2e588797c1c7\") " pod="openstack/ceilometer-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.554102 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/366a5d80-56a5-4847-9bb6-2e588797c1c7-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"366a5d80-56a5-4847-9bb6-2e588797c1c7\") " pod="openstack/ceilometer-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.554206 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3-scripts\") pod \"glance-default-external-api-0\" (UID: \"02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3\") " pod="openstack/glance-default-external-api-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.555345 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/366a5d80-56a5-4847-9bb6-2e588797c1c7-config-data\") pod \"ceilometer-0\" (UID: \"366a5d80-56a5-4847-9bb6-2e588797c1c7\") " pod="openstack/ceilometer-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.558544 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/366a5d80-56a5-4847-9bb6-2e588797c1c7-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"366a5d80-56a5-4847-9bb6-2e588797c1c7\") " pod="openstack/ceilometer-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.560943 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3\") " pod="openstack/glance-default-external-api-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.566575 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8nfdl\" (UniqueName: \"kubernetes.io/projected/366a5d80-56a5-4847-9bb6-2e588797c1c7-kube-api-access-8nfdl\") pod \"ceilometer-0\" (UID: \"366a5d80-56a5-4847-9bb6-2e588797c1c7\") " pod="openstack/ceilometer-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.576372 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vndg7\" (UniqueName: \"kubernetes.io/projected/02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3-kube-api-access-vndg7\") pod \"glance-default-external-api-0\" (UID: \"02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3\") " pod="openstack/glance-default-external-api-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.593918 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3\") " pod="openstack/glance-default-external-api-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.652059 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.698655 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.705371 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.753735 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8626dac3-0df1-42b8-8ea2-52239b7b73c3-internal-tls-certs\") pod \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\" (UID: \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\") " Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.753868 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8626dac3-0df1-42b8-8ea2-52239b7b73c3-combined-ca-bundle\") pod \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\" (UID: \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\") " Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.753925 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8626dac3-0df1-42b8-8ea2-52239b7b73c3-config-data\") pod \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\" (UID: \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\") " Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.753965 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8626dac3-0df1-42b8-8ea2-52239b7b73c3-httpd-run\") pod \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\" (UID: \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\") " Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.754018 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\" (UID: \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\") " Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.754125 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8626dac3-0df1-42b8-8ea2-52239b7b73c3-logs\") pod \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\" (UID: \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\") " Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.754230 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nmglg\" (UniqueName: \"kubernetes.io/projected/8626dac3-0df1-42b8-8ea2-52239b7b73c3-kube-api-access-nmglg\") pod \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\" (UID: \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\") " Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.754262 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8626dac3-0df1-42b8-8ea2-52239b7b73c3-scripts\") pod \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\" (UID: \"8626dac3-0df1-42b8-8ea2-52239b7b73c3\") " Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.759130 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8626dac3-0df1-42b8-8ea2-52239b7b73c3-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "8626dac3-0df1-42b8-8ea2-52239b7b73c3" (UID: "8626dac3-0df1-42b8-8ea2-52239b7b73c3"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.759790 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8626dac3-0df1-42b8-8ea2-52239b7b73c3-logs" (OuterVolumeSpecName: "logs") pod "8626dac3-0df1-42b8-8ea2-52239b7b73c3" (UID: "8626dac3-0df1-42b8-8ea2-52239b7b73c3"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.777045 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8626dac3-0df1-42b8-8ea2-52239b7b73c3-scripts" (OuterVolumeSpecName: "scripts") pod "8626dac3-0df1-42b8-8ea2-52239b7b73c3" (UID: "8626dac3-0df1-42b8-8ea2-52239b7b73c3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.777189 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "8626dac3-0df1-42b8-8ea2-52239b7b73c3" (UID: "8626dac3-0df1-42b8-8ea2-52239b7b73c3"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.797771 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8626dac3-0df1-42b8-8ea2-52239b7b73c3-kube-api-access-nmglg" (OuterVolumeSpecName: "kube-api-access-nmglg") pod "8626dac3-0df1-42b8-8ea2-52239b7b73c3" (UID: "8626dac3-0df1-42b8-8ea2-52239b7b73c3"). InnerVolumeSpecName "kube-api-access-nmglg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.819331 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8626dac3-0df1-42b8-8ea2-52239b7b73c3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8626dac3-0df1-42b8-8ea2-52239b7b73c3" (UID: "8626dac3-0df1-42b8-8ea2-52239b7b73c3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.860719 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nmglg\" (UniqueName: \"kubernetes.io/projected/8626dac3-0df1-42b8-8ea2-52239b7b73c3-kube-api-access-nmglg\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.860755 4691 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8626dac3-0df1-42b8-8ea2-52239b7b73c3-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.860765 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8626dac3-0df1-42b8-8ea2-52239b7b73c3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.860777 4691 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8626dac3-0df1-42b8-8ea2-52239b7b73c3-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.860800 4691 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.860809 4691 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8626dac3-0df1-42b8-8ea2-52239b7b73c3-logs\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.867538 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8626dac3-0df1-42b8-8ea2-52239b7b73c3-config-data" (OuterVolumeSpecName: "config-data") pod "8626dac3-0df1-42b8-8ea2-52239b7b73c3" (UID: "8626dac3-0df1-42b8-8ea2-52239b7b73c3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.892912 4691 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.965437 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8626dac3-0df1-42b8-8ea2-52239b7b73c3-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.965520 4691 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:45 crc kubenswrapper[4691]: I1124 08:14:45.966830 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8626dac3-0df1-42b8-8ea2-52239b7b73c3-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "8626dac3-0df1-42b8-8ea2-52239b7b73c3" (UID: "8626dac3-0df1-42b8-8ea2-52239b7b73c3"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.066953 4691 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8626dac3-0df1-42b8-8ea2-52239b7b73c3-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.242132 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8626dac3-0df1-42b8-8ea2-52239b7b73c3","Type":"ContainerDied","Data":"9d8c93534e2f44793a40fac94db6a83609eae2776d35c5e359fa90d5651de361"} Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.242186 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.242192 4691 scope.go:117] "RemoveContainer" containerID="2dd5802e29187d4f6fe90a3efea1e88df7d7cbfb2877e458550a436462c05df1" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.274605 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.277382 4691 scope.go:117] "RemoveContainer" containerID="4c790e2944796a4992eff9a8a3b45102a0210263bf2eb0726d29adc7c9799463" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.282190 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.312662 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 08:14:46 crc kubenswrapper[4691]: E1124 08:14:46.313097 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8626dac3-0df1-42b8-8ea2-52239b7b73c3" containerName="glance-log" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.313111 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="8626dac3-0df1-42b8-8ea2-52239b7b73c3" containerName="glance-log" Nov 24 08:14:46 crc kubenswrapper[4691]: E1124 08:14:46.313122 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8626dac3-0df1-42b8-8ea2-52239b7b73c3" containerName="glance-httpd" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.313129 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="8626dac3-0df1-42b8-8ea2-52239b7b73c3" containerName="glance-httpd" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.313334 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="8626dac3-0df1-42b8-8ea2-52239b7b73c3" containerName="glance-log" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.313351 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="8626dac3-0df1-42b8-8ea2-52239b7b73c3" containerName="glance-httpd" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.314614 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.319174 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.320139 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.326616 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.374047 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/31f53279-5e1f-44f9-a1f5-338600bc0156-logs\") pod \"glance-default-internal-api-0\" (UID: \"31f53279-5e1f-44f9-a1f5-338600bc0156\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.374151 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gcb59\" (UniqueName: \"kubernetes.io/projected/31f53279-5e1f-44f9-a1f5-338600bc0156-kube-api-access-gcb59\") pod \"glance-default-internal-api-0\" (UID: \"31f53279-5e1f-44f9-a1f5-338600bc0156\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.374177 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/31f53279-5e1f-44f9-a1f5-338600bc0156-scripts\") pod \"glance-default-internal-api-0\" (UID: \"31f53279-5e1f-44f9-a1f5-338600bc0156\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.374214 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31f53279-5e1f-44f9-a1f5-338600bc0156-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"31f53279-5e1f-44f9-a1f5-338600bc0156\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.374229 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/31f53279-5e1f-44f9-a1f5-338600bc0156-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"31f53279-5e1f-44f9-a1f5-338600bc0156\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.374248 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/31f53279-5e1f-44f9-a1f5-338600bc0156-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"31f53279-5e1f-44f9-a1f5-338600bc0156\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.374264 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31f53279-5e1f-44f9-a1f5-338600bc0156-config-data\") pod \"glance-default-internal-api-0\" (UID: \"31f53279-5e1f-44f9-a1f5-338600bc0156\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.374310 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"31f53279-5e1f-44f9-a1f5-338600bc0156\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.384022 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.385944 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.426810 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.475887 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/31f53279-5e1f-44f9-a1f5-338600bc0156-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"31f53279-5e1f-44f9-a1f5-338600bc0156\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.475966 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31f53279-5e1f-44f9-a1f5-338600bc0156-config-data\") pod \"glance-default-internal-api-0\" (UID: \"31f53279-5e1f-44f9-a1f5-338600bc0156\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.476025 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"31f53279-5e1f-44f9-a1f5-338600bc0156\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.476090 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/31f53279-5e1f-44f9-a1f5-338600bc0156-logs\") pod \"glance-default-internal-api-0\" (UID: \"31f53279-5e1f-44f9-a1f5-338600bc0156\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.476181 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gcb59\" (UniqueName: \"kubernetes.io/projected/31f53279-5e1f-44f9-a1f5-338600bc0156-kube-api-access-gcb59\") pod \"glance-default-internal-api-0\" (UID: \"31f53279-5e1f-44f9-a1f5-338600bc0156\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.476201 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/31f53279-5e1f-44f9-a1f5-338600bc0156-scripts\") pod \"glance-default-internal-api-0\" (UID: \"31f53279-5e1f-44f9-a1f5-338600bc0156\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.476246 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31f53279-5e1f-44f9-a1f5-338600bc0156-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"31f53279-5e1f-44f9-a1f5-338600bc0156\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.476260 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/31f53279-5e1f-44f9-a1f5-338600bc0156-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"31f53279-5e1f-44f9-a1f5-338600bc0156\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.477305 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/31f53279-5e1f-44f9-a1f5-338600bc0156-logs\") pod \"glance-default-internal-api-0\" (UID: \"31f53279-5e1f-44f9-a1f5-338600bc0156\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.477661 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/31f53279-5e1f-44f9-a1f5-338600bc0156-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"31f53279-5e1f-44f9-a1f5-338600bc0156\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.483041 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/31f53279-5e1f-44f9-a1f5-338600bc0156-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"31f53279-5e1f-44f9-a1f5-338600bc0156\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.484396 4691 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"31f53279-5e1f-44f9-a1f5-338600bc0156\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-internal-api-0" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.492208 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31f53279-5e1f-44f9-a1f5-338600bc0156-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"31f53279-5e1f-44f9-a1f5-338600bc0156\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.505340 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31f53279-5e1f-44f9-a1f5-338600bc0156-config-data\") pod \"glance-default-internal-api-0\" (UID: \"31f53279-5e1f-44f9-a1f5-338600bc0156\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.523847 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/31f53279-5e1f-44f9-a1f5-338600bc0156-scripts\") pod \"glance-default-internal-api-0\" (UID: \"31f53279-5e1f-44f9-a1f5-338600bc0156\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.525971 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.525983 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gcb59\" (UniqueName: \"kubernetes.io/projected/31f53279-5e1f-44f9-a1f5-338600bc0156-kube-api-access-gcb59\") pod \"glance-default-internal-api-0\" (UID: \"31f53279-5e1f-44f9-a1f5-338600bc0156\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.562145 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"31f53279-5e1f-44f9-a1f5-338600bc0156\") " pod="openstack/glance-default-internal-api-0" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.632473 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.777014 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08ef6b82-8aff-4b11-a0be-9e04670b96b7" path="/var/lib/kubelet/pods/08ef6b82-8aff-4b11-a0be-9e04670b96b7/volumes" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.778219 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82e3978a-d699-4b7b-ac29-eee2e5d347f3" path="/var/lib/kubelet/pods/82e3978a-d699-4b7b-ac29-eee2e5d347f3/volumes" Nov 24 08:14:46 crc kubenswrapper[4691]: I1124 08:14:46.779637 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8626dac3-0df1-42b8-8ea2-52239b7b73c3" path="/var/lib/kubelet/pods/8626dac3-0df1-42b8-8ea2-52239b7b73c3/volumes" Nov 24 08:14:47 crc kubenswrapper[4691]: I1124 08:14:47.065481 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 24 08:14:47 crc kubenswrapper[4691]: W1124 08:14:47.078877 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod31f53279_5e1f_44f9_a1f5_338600bc0156.slice/crio-21a3a6397fbdc5c5483326dfc80fc15d727f0e6cd429bed01641f28798973bcf WatchSource:0}: Error finding container 21a3a6397fbdc5c5483326dfc80fc15d727f0e6cd429bed01641f28798973bcf: Status 404 returned error can't find the container with id 21a3a6397fbdc5c5483326dfc80fc15d727f0e6cd429bed01641f28798973bcf Nov 24 08:14:47 crc kubenswrapper[4691]: I1124 08:14:47.320231 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"366a5d80-56a5-4847-9bb6-2e588797c1c7","Type":"ContainerStarted","Data":"499bcbe2fc20cfd01608130e2afccb058228206971422ae1ae51eab8a5a4a4d1"} Nov 24 08:14:47 crc kubenswrapper[4691]: I1124 08:14:47.324003 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3","Type":"ContainerStarted","Data":"ce84e827108723c700d4a3b1300a8316a02140c32b42843e10f75439da721533"} Nov 24 08:14:47 crc kubenswrapper[4691]: I1124 08:14:47.328771 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"31f53279-5e1f-44f9-a1f5-338600bc0156","Type":"ContainerStarted","Data":"21a3a6397fbdc5c5483326dfc80fc15d727f0e6cd429bed01641f28798973bcf"} Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.019175 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.345958 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"31f53279-5e1f-44f9-a1f5-338600bc0156","Type":"ContainerStarted","Data":"bf9ca4b6ad9c04a335740a2f579d7b01b1ba3b365a23388c76751a1102f2eaf5"} Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.347652 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.351199 4691 generic.go:334] "Generic (PLEG): container finished" podID="a8f1c7da-eb94-4115-baf0-f15d335d85e0" containerID="4ced9f57be4446e67e4e177c95b7f8e4001f0083b4cc24d01bf530e640fb1997" exitCode=137 Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.351263 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a8f1c7da-eb94-4115-baf0-f15d335d85e0","Type":"ContainerDied","Data":"4ced9f57be4446e67e4e177c95b7f8e4001f0083b4cc24d01bf530e640fb1997"} Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.351294 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a8f1c7da-eb94-4115-baf0-f15d335d85e0","Type":"ContainerDied","Data":"90e758c5c136a75ae1f382596cc92dde64a60c39fa6c1e43833aecb926884746"} Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.351311 4691 scope.go:117] "RemoveContainer" containerID="4ced9f57be4446e67e4e177c95b7f8e4001f0083b4cc24d01bf530e640fb1997" Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.406909 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"366a5d80-56a5-4847-9bb6-2e588797c1c7","Type":"ContainerStarted","Data":"f9df67644fdbe22e920db3264c8753bda7a248d4c56df984da41ff3073911f9c"} Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.406961 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"366a5d80-56a5-4847-9bb6-2e588797c1c7","Type":"ContainerStarted","Data":"55611b5ff45330cd7521224586567f6e788cef5688e0fa08a592dd36c682b005"} Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.413801 4691 scope.go:117] "RemoveContainer" containerID="93b489312118032a814911f7cb2dae24b326939a8ba73c3e954d17570b879b09" Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.434798 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3","Type":"ContainerStarted","Data":"e812fa4f619a055725ee87b5e9534edf13ddfd50d218d20d1a64d0d74d1c4ce8"} Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.434839 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3","Type":"ContainerStarted","Data":"f83fd62926b8a820b10e02e5317883a7dc2e050385efbd20ff87911bb7f5bce0"} Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.479049 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.479023016 podStartE2EDuration="3.479023016s" podCreationTimestamp="2025-11-24 08:14:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:14:48.472796485 +0000 UTC m=+1050.471745734" watchObservedRunningTime="2025-11-24 08:14:48.479023016 +0000 UTC m=+1050.477972255" Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.490114 4691 scope.go:117] "RemoveContainer" containerID="4ced9f57be4446e67e4e177c95b7f8e4001f0083b4cc24d01bf530e640fb1997" Nov 24 08:14:48 crc kubenswrapper[4691]: E1124 08:14:48.490752 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ced9f57be4446e67e4e177c95b7f8e4001f0083b4cc24d01bf530e640fb1997\": container with ID starting with 4ced9f57be4446e67e4e177c95b7f8e4001f0083b4cc24d01bf530e640fb1997 not found: ID does not exist" containerID="4ced9f57be4446e67e4e177c95b7f8e4001f0083b4cc24d01bf530e640fb1997" Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.490807 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ced9f57be4446e67e4e177c95b7f8e4001f0083b4cc24d01bf530e640fb1997"} err="failed to get container status \"4ced9f57be4446e67e4e177c95b7f8e4001f0083b4cc24d01bf530e640fb1997\": rpc error: code = NotFound desc = could not find container \"4ced9f57be4446e67e4e177c95b7f8e4001f0083b4cc24d01bf530e640fb1997\": container with ID starting with 4ced9f57be4446e67e4e177c95b7f8e4001f0083b4cc24d01bf530e640fb1997 not found: ID does not exist" Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.490846 4691 scope.go:117] "RemoveContainer" containerID="93b489312118032a814911f7cb2dae24b326939a8ba73c3e954d17570b879b09" Nov 24 08:14:48 crc kubenswrapper[4691]: E1124 08:14:48.491252 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"93b489312118032a814911f7cb2dae24b326939a8ba73c3e954d17570b879b09\": container with ID starting with 93b489312118032a814911f7cb2dae24b326939a8ba73c3e954d17570b879b09 not found: ID does not exist" containerID="93b489312118032a814911f7cb2dae24b326939a8ba73c3e954d17570b879b09" Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.491325 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93b489312118032a814911f7cb2dae24b326939a8ba73c3e954d17570b879b09"} err="failed to get container status \"93b489312118032a814911f7cb2dae24b326939a8ba73c3e954d17570b879b09\": rpc error: code = NotFound desc = could not find container \"93b489312118032a814911f7cb2dae24b326939a8ba73c3e954d17570b879b09\": container with ID starting with 93b489312118032a814911f7cb2dae24b326939a8ba73c3e954d17570b879b09 not found: ID does not exist" Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.545124 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a8f1c7da-eb94-4115-baf0-f15d335d85e0-logs" (OuterVolumeSpecName: "logs") pod "a8f1c7da-eb94-4115-baf0-f15d335d85e0" (UID: "a8f1c7da-eb94-4115-baf0-f15d335d85e0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.545410 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a8f1c7da-eb94-4115-baf0-f15d335d85e0-logs\") pod \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\" (UID: \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\") " Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.545713 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a8f1c7da-eb94-4115-baf0-f15d335d85e0-config-data-custom\") pod \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\" (UID: \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\") " Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.546969 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8f1c7da-eb94-4115-baf0-f15d335d85e0-config-data\") pod \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\" (UID: \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\") " Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.547111 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8f1c7da-eb94-4115-baf0-f15d335d85e0-combined-ca-bundle\") pod \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\" (UID: \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\") " Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.548895 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8f1c7da-eb94-4115-baf0-f15d335d85e0-scripts\") pod \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\" (UID: \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\") " Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.549221 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a8f1c7da-eb94-4115-baf0-f15d335d85e0-etc-machine-id\") pod \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\" (UID: \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\") " Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.549612 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a8f1c7da-eb94-4115-baf0-f15d335d85e0-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "a8f1c7da-eb94-4115-baf0-f15d335d85e0" (UID: "a8f1c7da-eb94-4115-baf0-f15d335d85e0"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.549612 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4n9zl\" (UniqueName: \"kubernetes.io/projected/a8f1c7da-eb94-4115-baf0-f15d335d85e0-kube-api-access-4n9zl\") pod \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\" (UID: \"a8f1c7da-eb94-4115-baf0-f15d335d85e0\") " Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.553409 4691 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a8f1c7da-eb94-4115-baf0-f15d335d85e0-logs\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.553602 4691 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a8f1c7da-eb94-4115-baf0-f15d335d85e0-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.555767 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8f1c7da-eb94-4115-baf0-f15d335d85e0-scripts" (OuterVolumeSpecName: "scripts") pod "a8f1c7da-eb94-4115-baf0-f15d335d85e0" (UID: "a8f1c7da-eb94-4115-baf0-f15d335d85e0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.556042 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8f1c7da-eb94-4115-baf0-f15d335d85e0-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "a8f1c7da-eb94-4115-baf0-f15d335d85e0" (UID: "a8f1c7da-eb94-4115-baf0-f15d335d85e0"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.568067 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8f1c7da-eb94-4115-baf0-f15d335d85e0-kube-api-access-4n9zl" (OuterVolumeSpecName: "kube-api-access-4n9zl") pod "a8f1c7da-eb94-4115-baf0-f15d335d85e0" (UID: "a8f1c7da-eb94-4115-baf0-f15d335d85e0"). InnerVolumeSpecName "kube-api-access-4n9zl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.613570 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8f1c7da-eb94-4115-baf0-f15d335d85e0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a8f1c7da-eb94-4115-baf0-f15d335d85e0" (UID: "a8f1c7da-eb94-4115-baf0-f15d335d85e0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.656185 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4n9zl\" (UniqueName: \"kubernetes.io/projected/a8f1c7da-eb94-4115-baf0-f15d335d85e0-kube-api-access-4n9zl\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.656560 4691 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a8f1c7da-eb94-4115-baf0-f15d335d85e0-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.656672 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8f1c7da-eb94-4115-baf0-f15d335d85e0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.656791 4691 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8f1c7da-eb94-4115-baf0-f15d335d85e0-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.688805 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8f1c7da-eb94-4115-baf0-f15d335d85e0-config-data" (OuterVolumeSpecName: "config-data") pod "a8f1c7da-eb94-4115-baf0-f15d335d85e0" (UID: "a8f1c7da-eb94-4115-baf0-f15d335d85e0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:48 crc kubenswrapper[4691]: I1124 08:14:48.760546 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8f1c7da-eb94-4115-baf0-f15d335d85e0-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.449731 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"366a5d80-56a5-4847-9bb6-2e588797c1c7","Type":"ContainerStarted","Data":"ba81a9ed52a6c4097253f3cb0dbc726c298d6822688c600b061dbd4869e5d8cb"} Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.452423 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"31f53279-5e1f-44f9-a1f5-338600bc0156","Type":"ContainerStarted","Data":"07e6be033b23849ccb1fed016564b733567525cfda2353135612f32480f2ee62"} Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.454355 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.506198 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.506174893 podStartE2EDuration="3.506174893s" podCreationTimestamp="2025-11-24 08:14:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:14:49.480973593 +0000 UTC m=+1051.479922842" watchObservedRunningTime="2025-11-24 08:14:49.506174893 +0000 UTC m=+1051.505124152" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.514237 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.525355 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.538819 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 24 08:14:49 crc kubenswrapper[4691]: E1124 08:14:49.539423 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8f1c7da-eb94-4115-baf0-f15d335d85e0" containerName="cinder-api-log" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.539471 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8f1c7da-eb94-4115-baf0-f15d335d85e0" containerName="cinder-api-log" Nov 24 08:14:49 crc kubenswrapper[4691]: E1124 08:14:49.539506 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8f1c7da-eb94-4115-baf0-f15d335d85e0" containerName="cinder-api" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.539514 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8f1c7da-eb94-4115-baf0-f15d335d85e0" containerName="cinder-api" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.539783 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8f1c7da-eb94-4115-baf0-f15d335d85e0" containerName="cinder-api" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.539819 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8f1c7da-eb94-4115-baf0-f15d335d85e0" containerName="cinder-api-log" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.541146 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.544463 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.544625 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.544682 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.556921 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.682613 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9b3d587-fa7c-4af9-8667-d4ea91483ad9-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"e9b3d587-fa7c-4af9-8667-d4ea91483ad9\") " pod="openstack/cinder-api-0" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.682896 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9b3d587-fa7c-4af9-8667-d4ea91483ad9-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"e9b3d587-fa7c-4af9-8667-d4ea91483ad9\") " pod="openstack/cinder-api-0" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.683032 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c47kd\" (UniqueName: \"kubernetes.io/projected/e9b3d587-fa7c-4af9-8667-d4ea91483ad9-kube-api-access-c47kd\") pod \"cinder-api-0\" (UID: \"e9b3d587-fa7c-4af9-8667-d4ea91483ad9\") " pod="openstack/cinder-api-0" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.683163 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e9b3d587-fa7c-4af9-8667-d4ea91483ad9-config-data-custom\") pod \"cinder-api-0\" (UID: \"e9b3d587-fa7c-4af9-8667-d4ea91483ad9\") " pod="openstack/cinder-api-0" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.683408 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9b3d587-fa7c-4af9-8667-d4ea91483ad9-scripts\") pod \"cinder-api-0\" (UID: \"e9b3d587-fa7c-4af9-8667-d4ea91483ad9\") " pod="openstack/cinder-api-0" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.683549 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9b3d587-fa7c-4af9-8667-d4ea91483ad9-public-tls-certs\") pod \"cinder-api-0\" (UID: \"e9b3d587-fa7c-4af9-8667-d4ea91483ad9\") " pod="openstack/cinder-api-0" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.683648 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e9b3d587-fa7c-4af9-8667-d4ea91483ad9-etc-machine-id\") pod \"cinder-api-0\" (UID: \"e9b3d587-fa7c-4af9-8667-d4ea91483ad9\") " pod="openstack/cinder-api-0" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.683736 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9b3d587-fa7c-4af9-8667-d4ea91483ad9-config-data\") pod \"cinder-api-0\" (UID: \"e9b3d587-fa7c-4af9-8667-d4ea91483ad9\") " pod="openstack/cinder-api-0" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.683916 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9b3d587-fa7c-4af9-8667-d4ea91483ad9-logs\") pod \"cinder-api-0\" (UID: \"e9b3d587-fa7c-4af9-8667-d4ea91483ad9\") " pod="openstack/cinder-api-0" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.786367 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c47kd\" (UniqueName: \"kubernetes.io/projected/e9b3d587-fa7c-4af9-8667-d4ea91483ad9-kube-api-access-c47kd\") pod \"cinder-api-0\" (UID: \"e9b3d587-fa7c-4af9-8667-d4ea91483ad9\") " pod="openstack/cinder-api-0" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.786418 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e9b3d587-fa7c-4af9-8667-d4ea91483ad9-config-data-custom\") pod \"cinder-api-0\" (UID: \"e9b3d587-fa7c-4af9-8667-d4ea91483ad9\") " pod="openstack/cinder-api-0" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.786524 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9b3d587-fa7c-4af9-8667-d4ea91483ad9-scripts\") pod \"cinder-api-0\" (UID: \"e9b3d587-fa7c-4af9-8667-d4ea91483ad9\") " pod="openstack/cinder-api-0" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.786551 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9b3d587-fa7c-4af9-8667-d4ea91483ad9-public-tls-certs\") pod \"cinder-api-0\" (UID: \"e9b3d587-fa7c-4af9-8667-d4ea91483ad9\") " pod="openstack/cinder-api-0" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.786580 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e9b3d587-fa7c-4af9-8667-d4ea91483ad9-etc-machine-id\") pod \"cinder-api-0\" (UID: \"e9b3d587-fa7c-4af9-8667-d4ea91483ad9\") " pod="openstack/cinder-api-0" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.786594 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9b3d587-fa7c-4af9-8667-d4ea91483ad9-config-data\") pod \"cinder-api-0\" (UID: \"e9b3d587-fa7c-4af9-8667-d4ea91483ad9\") " pod="openstack/cinder-api-0" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.786621 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9b3d587-fa7c-4af9-8667-d4ea91483ad9-logs\") pod \"cinder-api-0\" (UID: \"e9b3d587-fa7c-4af9-8667-d4ea91483ad9\") " pod="openstack/cinder-api-0" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.786655 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9b3d587-fa7c-4af9-8667-d4ea91483ad9-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"e9b3d587-fa7c-4af9-8667-d4ea91483ad9\") " pod="openstack/cinder-api-0" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.786681 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9b3d587-fa7c-4af9-8667-d4ea91483ad9-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"e9b3d587-fa7c-4af9-8667-d4ea91483ad9\") " pod="openstack/cinder-api-0" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.787738 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9b3d587-fa7c-4af9-8667-d4ea91483ad9-logs\") pod \"cinder-api-0\" (UID: \"e9b3d587-fa7c-4af9-8667-d4ea91483ad9\") " pod="openstack/cinder-api-0" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.787810 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e9b3d587-fa7c-4af9-8667-d4ea91483ad9-etc-machine-id\") pod \"cinder-api-0\" (UID: \"e9b3d587-fa7c-4af9-8667-d4ea91483ad9\") " pod="openstack/cinder-api-0" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.792675 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9b3d587-fa7c-4af9-8667-d4ea91483ad9-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"e9b3d587-fa7c-4af9-8667-d4ea91483ad9\") " pod="openstack/cinder-api-0" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.792764 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e9b3d587-fa7c-4af9-8667-d4ea91483ad9-config-data-custom\") pod \"cinder-api-0\" (UID: \"e9b3d587-fa7c-4af9-8667-d4ea91483ad9\") " pod="openstack/cinder-api-0" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.792887 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9b3d587-fa7c-4af9-8667-d4ea91483ad9-scripts\") pod \"cinder-api-0\" (UID: \"e9b3d587-fa7c-4af9-8667-d4ea91483ad9\") " pod="openstack/cinder-api-0" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.793615 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9b3d587-fa7c-4af9-8667-d4ea91483ad9-config-data\") pod \"cinder-api-0\" (UID: \"e9b3d587-fa7c-4af9-8667-d4ea91483ad9\") " pod="openstack/cinder-api-0" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.794871 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9b3d587-fa7c-4af9-8667-d4ea91483ad9-public-tls-certs\") pod \"cinder-api-0\" (UID: \"e9b3d587-fa7c-4af9-8667-d4ea91483ad9\") " pod="openstack/cinder-api-0" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.797213 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9b3d587-fa7c-4af9-8667-d4ea91483ad9-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"e9b3d587-fa7c-4af9-8667-d4ea91483ad9\") " pod="openstack/cinder-api-0" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.815161 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c47kd\" (UniqueName: \"kubernetes.io/projected/e9b3d587-fa7c-4af9-8667-d4ea91483ad9-kube-api-access-c47kd\") pod \"cinder-api-0\" (UID: \"e9b3d587-fa7c-4af9-8667-d4ea91483ad9\") " pod="openstack/cinder-api-0" Nov 24 08:14:49 crc kubenswrapper[4691]: I1124 08:14:49.861874 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 24 08:14:50 crc kubenswrapper[4691]: I1124 08:14:50.155442 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 24 08:14:50 crc kubenswrapper[4691]: I1124 08:14:50.483598 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"366a5d80-56a5-4847-9bb6-2e588797c1c7","Type":"ContainerStarted","Data":"6e814cdb7c9f4c419bd886e77f7f6d194c512bea3a24dc4468bc8ce80ca8122e"} Nov 24 08:14:50 crc kubenswrapper[4691]: I1124 08:14:50.483849 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="366a5d80-56a5-4847-9bb6-2e588797c1c7" containerName="ceilometer-central-agent" containerID="cri-o://55611b5ff45330cd7521224586567f6e788cef5688e0fa08a592dd36c682b005" gracePeriod=30 Nov 24 08:14:50 crc kubenswrapper[4691]: I1124 08:14:50.484168 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 24 08:14:50 crc kubenswrapper[4691]: I1124 08:14:50.484556 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="366a5d80-56a5-4847-9bb6-2e588797c1c7" containerName="proxy-httpd" containerID="cri-o://6e814cdb7c9f4c419bd886e77f7f6d194c512bea3a24dc4468bc8ce80ca8122e" gracePeriod=30 Nov 24 08:14:50 crc kubenswrapper[4691]: I1124 08:14:50.484652 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="366a5d80-56a5-4847-9bb6-2e588797c1c7" containerName="sg-core" containerID="cri-o://ba81a9ed52a6c4097253f3cb0dbc726c298d6822688c600b061dbd4869e5d8cb" gracePeriod=30 Nov 24 08:14:50 crc kubenswrapper[4691]: I1124 08:14:50.484853 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="366a5d80-56a5-4847-9bb6-2e588797c1c7" containerName="ceilometer-notification-agent" containerID="cri-o://f9df67644fdbe22e920db3264c8753bda7a248d4c56df984da41ff3073911f9c" gracePeriod=30 Nov 24 08:14:50 crc kubenswrapper[4691]: I1124 08:14:50.498843 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"e9b3d587-fa7c-4af9-8667-d4ea91483ad9","Type":"ContainerStarted","Data":"a2ee1be1cf84c6722309ae1404b724b2c4a764bbe82ef5b16781068c8de81753"} Nov 24 08:14:50 crc kubenswrapper[4691]: I1124 08:14:50.514380 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.900260321 podStartE2EDuration="5.514345401s" podCreationTimestamp="2025-11-24 08:14:45 +0000 UTC" firstStartedPulling="2025-11-24 08:14:46.440646991 +0000 UTC m=+1048.439596240" lastFinishedPulling="2025-11-24 08:14:50.054732071 +0000 UTC m=+1052.053681320" observedRunningTime="2025-11-24 08:14:50.50567903 +0000 UTC m=+1052.504628289" watchObservedRunningTime="2025-11-24 08:14:50.514345401 +0000 UTC m=+1052.513294650" Nov 24 08:14:50 crc kubenswrapper[4691]: I1124 08:14:50.777095 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a8f1c7da-eb94-4115-baf0-f15d335d85e0" path="/var/lib/kubelet/pods/a8f1c7da-eb94-4115-baf0-f15d335d85e0/volumes" Nov 24 08:14:51 crc kubenswrapper[4691]: I1124 08:14:51.515342 4691 generic.go:334] "Generic (PLEG): container finished" podID="366a5d80-56a5-4847-9bb6-2e588797c1c7" containerID="ba81a9ed52a6c4097253f3cb0dbc726c298d6822688c600b061dbd4869e5d8cb" exitCode=2 Nov 24 08:14:51 crc kubenswrapper[4691]: I1124 08:14:51.515827 4691 generic.go:334] "Generic (PLEG): container finished" podID="366a5d80-56a5-4847-9bb6-2e588797c1c7" containerID="f9df67644fdbe22e920db3264c8753bda7a248d4c56df984da41ff3073911f9c" exitCode=0 Nov 24 08:14:51 crc kubenswrapper[4691]: I1124 08:14:51.515426 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"366a5d80-56a5-4847-9bb6-2e588797c1c7","Type":"ContainerDied","Data":"ba81a9ed52a6c4097253f3cb0dbc726c298d6822688c600b061dbd4869e5d8cb"} Nov 24 08:14:51 crc kubenswrapper[4691]: I1124 08:14:51.515919 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"366a5d80-56a5-4847-9bb6-2e588797c1c7","Type":"ContainerDied","Data":"f9df67644fdbe22e920db3264c8753bda7a248d4c56df984da41ff3073911f9c"} Nov 24 08:14:51 crc kubenswrapper[4691]: I1124 08:14:51.519695 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"e9b3d587-fa7c-4af9-8667-d4ea91483ad9","Type":"ContainerStarted","Data":"d16c9b733e4e640d28a41e013f202cc718a693bf6faf26ac9fff4446542b032b"} Nov 24 08:14:52 crc kubenswrapper[4691]: I1124 08:14:52.530900 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"e9b3d587-fa7c-4af9-8667-d4ea91483ad9","Type":"ContainerStarted","Data":"fcb70d9b31e411fd8d6343c8a3543a215b080e3fc1cb7eba4cf57f7797d4ace1"} Nov 24 08:14:52 crc kubenswrapper[4691]: I1124 08:14:52.531289 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 24 08:14:52 crc kubenswrapper[4691]: I1124 08:14:52.554149 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.554126916 podStartE2EDuration="3.554126916s" podCreationTimestamp="2025-11-24 08:14:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:14:52.550438409 +0000 UTC m=+1054.549387668" watchObservedRunningTime="2025-11-24 08:14:52.554126916 +0000 UTC m=+1054.553076165" Nov 24 08:14:54 crc kubenswrapper[4691]: I1124 08:14:54.097878 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-77477f4d7b-kclfz" podUID="567ed4cd-aaf3-4e52-be70-2f723075d545" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.152:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.152:8443: connect: connection refused" Nov 24 08:14:54 crc kubenswrapper[4691]: I1124 08:14:54.098559 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-77477f4d7b-kclfz" Nov 24 08:14:55 crc kubenswrapper[4691]: I1124 08:14:55.652782 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 24 08:14:55 crc kubenswrapper[4691]: I1124 08:14:55.653316 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 24 08:14:55 crc kubenswrapper[4691]: I1124 08:14:55.718246 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 24 08:14:55 crc kubenswrapper[4691]: I1124 08:14:55.726651 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 24 08:14:55 crc kubenswrapper[4691]: I1124 08:14:55.728035 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-hs4vf"] Nov 24 08:14:55 crc kubenswrapper[4691]: I1124 08:14:55.730274 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-hs4vf" Nov 24 08:14:55 crc kubenswrapper[4691]: I1124 08:14:55.758979 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-hs4vf"] Nov 24 08:14:55 crc kubenswrapper[4691]: I1124 08:14:55.819542 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tftnv\" (UniqueName: \"kubernetes.io/projected/4782846a-9f12-486c-b56f-137ef67dc92c-kube-api-access-tftnv\") pod \"nova-api-db-create-hs4vf\" (UID: \"4782846a-9f12-486c-b56f-137ef67dc92c\") " pod="openstack/nova-api-db-create-hs4vf" Nov 24 08:14:55 crc kubenswrapper[4691]: I1124 08:14:55.819949 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4782846a-9f12-486c-b56f-137ef67dc92c-operator-scripts\") pod \"nova-api-db-create-hs4vf\" (UID: \"4782846a-9f12-486c-b56f-137ef67dc92c\") " pod="openstack/nova-api-db-create-hs4vf" Nov 24 08:14:55 crc kubenswrapper[4691]: I1124 08:14:55.842470 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-wqkdf"] Nov 24 08:14:55 crc kubenswrapper[4691]: I1124 08:14:55.844209 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-wqkdf" Nov 24 08:14:55 crc kubenswrapper[4691]: I1124 08:14:55.862997 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-wqkdf"] Nov 24 08:14:55 crc kubenswrapper[4691]: I1124 08:14:55.882696 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-8a87-account-create-kq8md"] Nov 24 08:14:55 crc kubenswrapper[4691]: I1124 08:14:55.884362 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-8a87-account-create-kq8md" Nov 24 08:14:55 crc kubenswrapper[4691]: I1124 08:14:55.890632 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 24 08:14:55 crc kubenswrapper[4691]: I1124 08:14:55.918162 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-8a87-account-create-kq8md"] Nov 24 08:14:55 crc kubenswrapper[4691]: I1124 08:14:55.921407 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2x59h\" (UniqueName: \"kubernetes.io/projected/db6ac827-d728-40ca-bc93-b8f406242a9d-kube-api-access-2x59h\") pod \"nova-cell0-db-create-wqkdf\" (UID: \"db6ac827-d728-40ca-bc93-b8f406242a9d\") " pod="openstack/nova-cell0-db-create-wqkdf" Nov 24 08:14:55 crc kubenswrapper[4691]: I1124 08:14:55.921546 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4782846a-9f12-486c-b56f-137ef67dc92c-operator-scripts\") pod \"nova-api-db-create-hs4vf\" (UID: \"4782846a-9f12-486c-b56f-137ef67dc92c\") " pod="openstack/nova-api-db-create-hs4vf" Nov 24 08:14:55 crc kubenswrapper[4691]: I1124 08:14:55.921737 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gm8vx\" (UniqueName: \"kubernetes.io/projected/3a07bbfa-86c4-40b0-aebd-684592d41663-kube-api-access-gm8vx\") pod \"nova-api-8a87-account-create-kq8md\" (UID: \"3a07bbfa-86c4-40b0-aebd-684592d41663\") " pod="openstack/nova-api-8a87-account-create-kq8md" Nov 24 08:14:55 crc kubenswrapper[4691]: I1124 08:14:55.921850 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tftnv\" (UniqueName: \"kubernetes.io/projected/4782846a-9f12-486c-b56f-137ef67dc92c-kube-api-access-tftnv\") pod \"nova-api-db-create-hs4vf\" (UID: \"4782846a-9f12-486c-b56f-137ef67dc92c\") " pod="openstack/nova-api-db-create-hs4vf" Nov 24 08:14:55 crc kubenswrapper[4691]: I1124 08:14:55.921891 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/db6ac827-d728-40ca-bc93-b8f406242a9d-operator-scripts\") pod \"nova-cell0-db-create-wqkdf\" (UID: \"db6ac827-d728-40ca-bc93-b8f406242a9d\") " pod="openstack/nova-cell0-db-create-wqkdf" Nov 24 08:14:55 crc kubenswrapper[4691]: I1124 08:14:55.921955 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3a07bbfa-86c4-40b0-aebd-684592d41663-operator-scripts\") pod \"nova-api-8a87-account-create-kq8md\" (UID: \"3a07bbfa-86c4-40b0-aebd-684592d41663\") " pod="openstack/nova-api-8a87-account-create-kq8md" Nov 24 08:14:55 crc kubenswrapper[4691]: I1124 08:14:55.922379 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4782846a-9f12-486c-b56f-137ef67dc92c-operator-scripts\") pod \"nova-api-db-create-hs4vf\" (UID: \"4782846a-9f12-486c-b56f-137ef67dc92c\") " pod="openstack/nova-api-db-create-hs4vf" Nov 24 08:14:55 crc kubenswrapper[4691]: I1124 08:14:55.950760 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tftnv\" (UniqueName: \"kubernetes.io/projected/4782846a-9f12-486c-b56f-137ef67dc92c-kube-api-access-tftnv\") pod \"nova-api-db-create-hs4vf\" (UID: \"4782846a-9f12-486c-b56f-137ef67dc92c\") " pod="openstack/nova-api-db-create-hs4vf" Nov 24 08:14:55 crc kubenswrapper[4691]: I1124 08:14:55.955508 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-8wsc6"] Nov 24 08:14:55 crc kubenswrapper[4691]: I1124 08:14:55.956987 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-8wsc6" Nov 24 08:14:55 crc kubenswrapper[4691]: I1124 08:14:55.961021 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-8wsc6"] Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.023217 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2x59h\" (UniqueName: \"kubernetes.io/projected/db6ac827-d728-40ca-bc93-b8f406242a9d-kube-api-access-2x59h\") pod \"nova-cell0-db-create-wqkdf\" (UID: \"db6ac827-d728-40ca-bc93-b8f406242a9d\") " pod="openstack/nova-cell0-db-create-wqkdf" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.023716 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-krp47\" (UniqueName: \"kubernetes.io/projected/cc91f7cc-3f75-46d8-b521-30dc169ab022-kube-api-access-krp47\") pod \"nova-cell1-db-create-8wsc6\" (UID: \"cc91f7cc-3f75-46d8-b521-30dc169ab022\") " pod="openstack/nova-cell1-db-create-8wsc6" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.023841 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc91f7cc-3f75-46d8-b521-30dc169ab022-operator-scripts\") pod \"nova-cell1-db-create-8wsc6\" (UID: \"cc91f7cc-3f75-46d8-b521-30dc169ab022\") " pod="openstack/nova-cell1-db-create-8wsc6" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.023881 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gm8vx\" (UniqueName: \"kubernetes.io/projected/3a07bbfa-86c4-40b0-aebd-684592d41663-kube-api-access-gm8vx\") pod \"nova-api-8a87-account-create-kq8md\" (UID: \"3a07bbfa-86c4-40b0-aebd-684592d41663\") " pod="openstack/nova-api-8a87-account-create-kq8md" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.023934 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/db6ac827-d728-40ca-bc93-b8f406242a9d-operator-scripts\") pod \"nova-cell0-db-create-wqkdf\" (UID: \"db6ac827-d728-40ca-bc93-b8f406242a9d\") " pod="openstack/nova-cell0-db-create-wqkdf" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.023988 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3a07bbfa-86c4-40b0-aebd-684592d41663-operator-scripts\") pod \"nova-api-8a87-account-create-kq8md\" (UID: \"3a07bbfa-86c4-40b0-aebd-684592d41663\") " pod="openstack/nova-api-8a87-account-create-kq8md" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.024910 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3a07bbfa-86c4-40b0-aebd-684592d41663-operator-scripts\") pod \"nova-api-8a87-account-create-kq8md\" (UID: \"3a07bbfa-86c4-40b0-aebd-684592d41663\") " pod="openstack/nova-api-8a87-account-create-kq8md" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.025903 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/db6ac827-d728-40ca-bc93-b8f406242a9d-operator-scripts\") pod \"nova-cell0-db-create-wqkdf\" (UID: \"db6ac827-d728-40ca-bc93-b8f406242a9d\") " pod="openstack/nova-cell0-db-create-wqkdf" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.038415 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-857b-account-create-trvmc"] Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.040695 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-857b-account-create-trvmc" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.046192 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2x59h\" (UniqueName: \"kubernetes.io/projected/db6ac827-d728-40ca-bc93-b8f406242a9d-kube-api-access-2x59h\") pod \"nova-cell0-db-create-wqkdf\" (UID: \"db6ac827-d728-40ca-bc93-b8f406242a9d\") " pod="openstack/nova-cell0-db-create-wqkdf" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.048114 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.050269 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gm8vx\" (UniqueName: \"kubernetes.io/projected/3a07bbfa-86c4-40b0-aebd-684592d41663-kube-api-access-gm8vx\") pod \"nova-api-8a87-account-create-kq8md\" (UID: \"3a07bbfa-86c4-40b0-aebd-684592d41663\") " pod="openstack/nova-api-8a87-account-create-kq8md" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.065917 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-857b-account-create-trvmc"] Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.074880 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-hs4vf" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.125305 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pddr9\" (UniqueName: \"kubernetes.io/projected/11d10376-5f07-4a8f-bca5-6ee8172f886f-kube-api-access-pddr9\") pod \"nova-cell0-857b-account-create-trvmc\" (UID: \"11d10376-5f07-4a8f-bca5-6ee8172f886f\") " pod="openstack/nova-cell0-857b-account-create-trvmc" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.125377 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc91f7cc-3f75-46d8-b521-30dc169ab022-operator-scripts\") pod \"nova-cell1-db-create-8wsc6\" (UID: \"cc91f7cc-3f75-46d8-b521-30dc169ab022\") " pod="openstack/nova-cell1-db-create-8wsc6" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.125476 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11d10376-5f07-4a8f-bca5-6ee8172f886f-operator-scripts\") pod \"nova-cell0-857b-account-create-trvmc\" (UID: \"11d10376-5f07-4a8f-bca5-6ee8172f886f\") " pod="openstack/nova-cell0-857b-account-create-trvmc" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.125545 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-krp47\" (UniqueName: \"kubernetes.io/projected/cc91f7cc-3f75-46d8-b521-30dc169ab022-kube-api-access-krp47\") pod \"nova-cell1-db-create-8wsc6\" (UID: \"cc91f7cc-3f75-46d8-b521-30dc169ab022\") " pod="openstack/nova-cell1-db-create-8wsc6" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.127659 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc91f7cc-3f75-46d8-b521-30dc169ab022-operator-scripts\") pod \"nova-cell1-db-create-8wsc6\" (UID: \"cc91f7cc-3f75-46d8-b521-30dc169ab022\") " pod="openstack/nova-cell1-db-create-8wsc6" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.143029 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-krp47\" (UniqueName: \"kubernetes.io/projected/cc91f7cc-3f75-46d8-b521-30dc169ab022-kube-api-access-krp47\") pod \"nova-cell1-db-create-8wsc6\" (UID: \"cc91f7cc-3f75-46d8-b521-30dc169ab022\") " pod="openstack/nova-cell1-db-create-8wsc6" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.187407 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-wqkdf" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.207485 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-8a87-account-create-kq8md" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.232900 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11d10376-5f07-4a8f-bca5-6ee8172f886f-operator-scripts\") pod \"nova-cell0-857b-account-create-trvmc\" (UID: \"11d10376-5f07-4a8f-bca5-6ee8172f886f\") " pod="openstack/nova-cell0-857b-account-create-trvmc" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.233023 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pddr9\" (UniqueName: \"kubernetes.io/projected/11d10376-5f07-4a8f-bca5-6ee8172f886f-kube-api-access-pddr9\") pod \"nova-cell0-857b-account-create-trvmc\" (UID: \"11d10376-5f07-4a8f-bca5-6ee8172f886f\") " pod="openstack/nova-cell0-857b-account-create-trvmc" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.236261 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11d10376-5f07-4a8f-bca5-6ee8172f886f-operator-scripts\") pod \"nova-cell0-857b-account-create-trvmc\" (UID: \"11d10376-5f07-4a8f-bca5-6ee8172f886f\") " pod="openstack/nova-cell0-857b-account-create-trvmc" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.263502 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pddr9\" (UniqueName: \"kubernetes.io/projected/11d10376-5f07-4a8f-bca5-6ee8172f886f-kube-api-access-pddr9\") pod \"nova-cell0-857b-account-create-trvmc\" (UID: \"11d10376-5f07-4a8f-bca5-6ee8172f886f\") " pod="openstack/nova-cell0-857b-account-create-trvmc" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.280321 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-5ab9-account-create-xpsht"] Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.287730 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-5ab9-account-create-xpsht" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.292036 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.318381 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-5ab9-account-create-xpsht"] Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.319384 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-8wsc6" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.336520 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/543a3fab-95e8-46c7-b566-b6b394749681-operator-scripts\") pod \"nova-cell1-5ab9-account-create-xpsht\" (UID: \"543a3fab-95e8-46c7-b566-b6b394749681\") " pod="openstack/nova-cell1-5ab9-account-create-xpsht" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.336846 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xg6m5\" (UniqueName: \"kubernetes.io/projected/543a3fab-95e8-46c7-b566-b6b394749681-kube-api-access-xg6m5\") pod \"nova-cell1-5ab9-account-create-xpsht\" (UID: \"543a3fab-95e8-46c7-b566-b6b394749681\") " pod="openstack/nova-cell1-5ab9-account-create-xpsht" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.438619 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/543a3fab-95e8-46c7-b566-b6b394749681-operator-scripts\") pod \"nova-cell1-5ab9-account-create-xpsht\" (UID: \"543a3fab-95e8-46c7-b566-b6b394749681\") " pod="openstack/nova-cell1-5ab9-account-create-xpsht" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.439057 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xg6m5\" (UniqueName: \"kubernetes.io/projected/543a3fab-95e8-46c7-b566-b6b394749681-kube-api-access-xg6m5\") pod \"nova-cell1-5ab9-account-create-xpsht\" (UID: \"543a3fab-95e8-46c7-b566-b6b394749681\") " pod="openstack/nova-cell1-5ab9-account-create-xpsht" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.440170 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/543a3fab-95e8-46c7-b566-b6b394749681-operator-scripts\") pod \"nova-cell1-5ab9-account-create-xpsht\" (UID: \"543a3fab-95e8-46c7-b566-b6b394749681\") " pod="openstack/nova-cell1-5ab9-account-create-xpsht" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.461190 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xg6m5\" (UniqueName: \"kubernetes.io/projected/543a3fab-95e8-46c7-b566-b6b394749681-kube-api-access-xg6m5\") pod \"nova-cell1-5ab9-account-create-xpsht\" (UID: \"543a3fab-95e8-46c7-b566-b6b394749681\") " pod="openstack/nova-cell1-5ab9-account-create-xpsht" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.486387 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-857b-account-create-trvmc" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.584965 4691 generic.go:334] "Generic (PLEG): container finished" podID="366a5d80-56a5-4847-9bb6-2e588797c1c7" containerID="55611b5ff45330cd7521224586567f6e788cef5688e0fa08a592dd36c682b005" exitCode=0 Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.585045 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"366a5d80-56a5-4847-9bb6-2e588797c1c7","Type":"ContainerDied","Data":"55611b5ff45330cd7521224586567f6e788cef5688e0fa08a592dd36c682b005"} Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.585794 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.585826 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.633777 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.633830 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.637928 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-5ab9-account-create-xpsht" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.697637 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.720805 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.794239 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-hs4vf"] Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.837992 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-8wsc6"] Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.888076 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-wqkdf"] Nov 24 08:14:56 crc kubenswrapper[4691]: I1124 08:14:56.903482 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-8a87-account-create-kq8md"] Nov 24 08:14:57 crc kubenswrapper[4691]: I1124 08:14:57.146944 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-857b-account-create-trvmc"] Nov 24 08:14:57 crc kubenswrapper[4691]: W1124 08:14:57.161624 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod11d10376_5f07_4a8f_bca5_6ee8172f886f.slice/crio-1dcf79c0288c2c40534bc72853f3faeb884ce2806d86ec26bd1000fda25729f2 WatchSource:0}: Error finding container 1dcf79c0288c2c40534bc72853f3faeb884ce2806d86ec26bd1000fda25729f2: Status 404 returned error can't find the container with id 1dcf79c0288c2c40534bc72853f3faeb884ce2806d86ec26bd1000fda25729f2 Nov 24 08:14:57 crc kubenswrapper[4691]: I1124 08:14:57.311796 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-5ab9-account-create-xpsht"] Nov 24 08:14:57 crc kubenswrapper[4691]: W1124 08:14:57.315047 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod543a3fab_95e8_46c7_b566_b6b394749681.slice/crio-c915b5b097a8cb34783fa772e11418cb35fb7fe3822292ac6890a06ee7f98ca6 WatchSource:0}: Error finding container c915b5b097a8cb34783fa772e11418cb35fb7fe3822292ac6890a06ee7f98ca6: Status 404 returned error can't find the container with id c915b5b097a8cb34783fa772e11418cb35fb7fe3822292ac6890a06ee7f98ca6 Nov 24 08:14:57 crc kubenswrapper[4691]: I1124 08:14:57.627708 4691 generic.go:334] "Generic (PLEG): container finished" podID="3a07bbfa-86c4-40b0-aebd-684592d41663" containerID="8fcb2c412e8404c7f61a375310d5c081a8815d57f772be72737350a68295cc4c" exitCode=0 Nov 24 08:14:57 crc kubenswrapper[4691]: I1124 08:14:57.627809 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-8a87-account-create-kq8md" event={"ID":"3a07bbfa-86c4-40b0-aebd-684592d41663","Type":"ContainerDied","Data":"8fcb2c412e8404c7f61a375310d5c081a8815d57f772be72737350a68295cc4c"} Nov 24 08:14:57 crc kubenswrapper[4691]: I1124 08:14:57.627851 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-8a87-account-create-kq8md" event={"ID":"3a07bbfa-86c4-40b0-aebd-684592d41663","Type":"ContainerStarted","Data":"93763dd1b81380fb27c9b8c4d82a002b2a2209d7fc43fbb020b040ecf4002936"} Nov 24 08:14:57 crc kubenswrapper[4691]: I1124 08:14:57.635533 4691 generic.go:334] "Generic (PLEG): container finished" podID="cc91f7cc-3f75-46d8-b521-30dc169ab022" containerID="cf4449bd40f19dcc5d0aea32234b0bb5d801fb3c2c4718842a799322d0435d7d" exitCode=0 Nov 24 08:14:57 crc kubenswrapper[4691]: I1124 08:14:57.635631 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-8wsc6" event={"ID":"cc91f7cc-3f75-46d8-b521-30dc169ab022","Type":"ContainerDied","Data":"cf4449bd40f19dcc5d0aea32234b0bb5d801fb3c2c4718842a799322d0435d7d"} Nov 24 08:14:57 crc kubenswrapper[4691]: I1124 08:14:57.635670 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-8wsc6" event={"ID":"cc91f7cc-3f75-46d8-b521-30dc169ab022","Type":"ContainerStarted","Data":"17ebf420aa65751f87f7cfc944cf93b2f5a16d0e18685f37adf1dde4f5fda59b"} Nov 24 08:14:57 crc kubenswrapper[4691]: I1124 08:14:57.639577 4691 generic.go:334] "Generic (PLEG): container finished" podID="4782846a-9f12-486c-b56f-137ef67dc92c" containerID="a00c7a3fe3bd72ac333731e2f13812945fca1d80e299ee94e2c1ae2fb4863623" exitCode=0 Nov 24 08:14:57 crc kubenswrapper[4691]: I1124 08:14:57.639657 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-hs4vf" event={"ID":"4782846a-9f12-486c-b56f-137ef67dc92c","Type":"ContainerDied","Data":"a00c7a3fe3bd72ac333731e2f13812945fca1d80e299ee94e2c1ae2fb4863623"} Nov 24 08:14:57 crc kubenswrapper[4691]: I1124 08:14:57.639688 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-hs4vf" event={"ID":"4782846a-9f12-486c-b56f-137ef67dc92c","Type":"ContainerStarted","Data":"f1be3ac382463185266018806da03c585888a13a8999d2e4c0f829488fad77d9"} Nov 24 08:14:57 crc kubenswrapper[4691]: I1124 08:14:57.658833 4691 generic.go:334] "Generic (PLEG): container finished" podID="db6ac827-d728-40ca-bc93-b8f406242a9d" containerID="dc60e967a5d3c1cda82c0f4a142fdfc691252fceb633a7b4604dff5f228d8dae" exitCode=0 Nov 24 08:14:57 crc kubenswrapper[4691]: I1124 08:14:57.659086 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-wqkdf" event={"ID":"db6ac827-d728-40ca-bc93-b8f406242a9d","Type":"ContainerDied","Data":"dc60e967a5d3c1cda82c0f4a142fdfc691252fceb633a7b4604dff5f228d8dae"} Nov 24 08:14:57 crc kubenswrapper[4691]: I1124 08:14:57.659116 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-wqkdf" event={"ID":"db6ac827-d728-40ca-bc93-b8f406242a9d","Type":"ContainerStarted","Data":"466441ad533d40dcde63e1808a235b7a1ab414e91cf6c3fffcba538b9c7a9713"} Nov 24 08:14:57 crc kubenswrapper[4691]: I1124 08:14:57.664120 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-857b-account-create-trvmc" event={"ID":"11d10376-5f07-4a8f-bca5-6ee8172f886f","Type":"ContainerStarted","Data":"7acc3b98a4aa8a1697f89b8a44664f41e04b87a6a59c86b14d3319e407aac1e2"} Nov 24 08:14:57 crc kubenswrapper[4691]: I1124 08:14:57.664157 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-857b-account-create-trvmc" event={"ID":"11d10376-5f07-4a8f-bca5-6ee8172f886f","Type":"ContainerStarted","Data":"1dcf79c0288c2c40534bc72853f3faeb884ce2806d86ec26bd1000fda25729f2"} Nov 24 08:14:57 crc kubenswrapper[4691]: I1124 08:14:57.667697 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-5ab9-account-create-xpsht" event={"ID":"543a3fab-95e8-46c7-b566-b6b394749681","Type":"ContainerStarted","Data":"d517adef1acf08ab50499d32756566d943d9c6d06134e62a3cbeb7c9bf2a5839"} Nov 24 08:14:57 crc kubenswrapper[4691]: I1124 08:14:57.667760 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-5ab9-account-create-xpsht" event={"ID":"543a3fab-95e8-46c7-b566-b6b394749681","Type":"ContainerStarted","Data":"c915b5b097a8cb34783fa772e11418cb35fb7fe3822292ac6890a06ee7f98ca6"} Nov 24 08:14:57 crc kubenswrapper[4691]: I1124 08:14:57.669243 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 24 08:14:57 crc kubenswrapper[4691]: I1124 08:14:57.669301 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 24 08:14:57 crc kubenswrapper[4691]: I1124 08:14:57.739633 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-5ab9-account-create-xpsht" podStartSLOduration=1.739611686 podStartE2EDuration="1.739611686s" podCreationTimestamp="2025-11-24 08:14:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:14:57.725884918 +0000 UTC m=+1059.724834167" watchObservedRunningTime="2025-11-24 08:14:57.739611686 +0000 UTC m=+1059.738560935" Nov 24 08:14:58 crc kubenswrapper[4691]: I1124 08:14:58.679036 4691 generic.go:334] "Generic (PLEG): container finished" podID="11d10376-5f07-4a8f-bca5-6ee8172f886f" containerID="7acc3b98a4aa8a1697f89b8a44664f41e04b87a6a59c86b14d3319e407aac1e2" exitCode=0 Nov 24 08:14:58 crc kubenswrapper[4691]: I1124 08:14:58.679122 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-857b-account-create-trvmc" event={"ID":"11d10376-5f07-4a8f-bca5-6ee8172f886f","Type":"ContainerDied","Data":"7acc3b98a4aa8a1697f89b8a44664f41e04b87a6a59c86b14d3319e407aac1e2"} Nov 24 08:14:58 crc kubenswrapper[4691]: I1124 08:14:58.726113 4691 generic.go:334] "Generic (PLEG): container finished" podID="543a3fab-95e8-46c7-b566-b6b394749681" containerID="d517adef1acf08ab50499d32756566d943d9c6d06134e62a3cbeb7c9bf2a5839" exitCode=0 Nov 24 08:14:58 crc kubenswrapper[4691]: I1124 08:14:58.726670 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-5ab9-account-create-xpsht" event={"ID":"543a3fab-95e8-46c7-b566-b6b394749681","Type":"ContainerDied","Data":"d517adef1acf08ab50499d32756566d943d9c6d06134e62a3cbeb7c9bf2a5839"} Nov 24 08:14:58 crc kubenswrapper[4691]: I1124 08:14:58.958609 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 24 08:14:58 crc kubenswrapper[4691]: I1124 08:14:58.959216 4691 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 08:14:58 crc kubenswrapper[4691]: I1124 08:14:58.980269 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.224372 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-857b-account-create-trvmc" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.333607 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pddr9\" (UniqueName: \"kubernetes.io/projected/11d10376-5f07-4a8f-bca5-6ee8172f886f-kube-api-access-pddr9\") pod \"11d10376-5f07-4a8f-bca5-6ee8172f886f\" (UID: \"11d10376-5f07-4a8f-bca5-6ee8172f886f\") " Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.333709 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11d10376-5f07-4a8f-bca5-6ee8172f886f-operator-scripts\") pod \"11d10376-5f07-4a8f-bca5-6ee8172f886f\" (UID: \"11d10376-5f07-4a8f-bca5-6ee8172f886f\") " Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.334474 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/11d10376-5f07-4a8f-bca5-6ee8172f886f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "11d10376-5f07-4a8f-bca5-6ee8172f886f" (UID: "11d10376-5f07-4a8f-bca5-6ee8172f886f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.352368 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11d10376-5f07-4a8f-bca5-6ee8172f886f-kube-api-access-pddr9" (OuterVolumeSpecName: "kube-api-access-pddr9") pod "11d10376-5f07-4a8f-bca5-6ee8172f886f" (UID: "11d10376-5f07-4a8f-bca5-6ee8172f886f"). InnerVolumeSpecName "kube-api-access-pddr9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.445247 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pddr9\" (UniqueName: \"kubernetes.io/projected/11d10376-5f07-4a8f-bca5-6ee8172f886f-kube-api-access-pddr9\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.445309 4691 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11d10376-5f07-4a8f-bca5-6ee8172f886f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.499536 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-8a87-account-create-kq8md" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.512024 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-wqkdf" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.525365 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-hs4vf" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.546309 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3a07bbfa-86c4-40b0-aebd-684592d41663-operator-scripts\") pod \"3a07bbfa-86c4-40b0-aebd-684592d41663\" (UID: \"3a07bbfa-86c4-40b0-aebd-684592d41663\") " Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.546382 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2x59h\" (UniqueName: \"kubernetes.io/projected/db6ac827-d728-40ca-bc93-b8f406242a9d-kube-api-access-2x59h\") pod \"db6ac827-d728-40ca-bc93-b8f406242a9d\" (UID: \"db6ac827-d728-40ca-bc93-b8f406242a9d\") " Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.546489 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gm8vx\" (UniqueName: \"kubernetes.io/projected/3a07bbfa-86c4-40b0-aebd-684592d41663-kube-api-access-gm8vx\") pod \"3a07bbfa-86c4-40b0-aebd-684592d41663\" (UID: \"3a07bbfa-86c4-40b0-aebd-684592d41663\") " Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.546625 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/db6ac827-d728-40ca-bc93-b8f406242a9d-operator-scripts\") pod \"db6ac827-d728-40ca-bc93-b8f406242a9d\" (UID: \"db6ac827-d728-40ca-bc93-b8f406242a9d\") " Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.547661 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db6ac827-d728-40ca-bc93-b8f406242a9d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "db6ac827-d728-40ca-bc93-b8f406242a9d" (UID: "db6ac827-d728-40ca-bc93-b8f406242a9d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.549208 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a07bbfa-86c4-40b0-aebd-684592d41663-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3a07bbfa-86c4-40b0-aebd-684592d41663" (UID: "3a07bbfa-86c4-40b0-aebd-684592d41663"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.554738 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db6ac827-d728-40ca-bc93-b8f406242a9d-kube-api-access-2x59h" (OuterVolumeSpecName: "kube-api-access-2x59h") pod "db6ac827-d728-40ca-bc93-b8f406242a9d" (UID: "db6ac827-d728-40ca-bc93-b8f406242a9d"). InnerVolumeSpecName "kube-api-access-2x59h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.557730 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-8wsc6" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.567794 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a07bbfa-86c4-40b0-aebd-684592d41663-kube-api-access-gm8vx" (OuterVolumeSpecName: "kube-api-access-gm8vx") pod "3a07bbfa-86c4-40b0-aebd-684592d41663" (UID: "3a07bbfa-86c4-40b0-aebd-684592d41663"). InnerVolumeSpecName "kube-api-access-gm8vx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.650760 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc91f7cc-3f75-46d8-b521-30dc169ab022-operator-scripts\") pod \"cc91f7cc-3f75-46d8-b521-30dc169ab022\" (UID: \"cc91f7cc-3f75-46d8-b521-30dc169ab022\") " Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.651131 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tftnv\" (UniqueName: \"kubernetes.io/projected/4782846a-9f12-486c-b56f-137ef67dc92c-kube-api-access-tftnv\") pod \"4782846a-9f12-486c-b56f-137ef67dc92c\" (UID: \"4782846a-9f12-486c-b56f-137ef67dc92c\") " Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.651244 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc91f7cc-3f75-46d8-b521-30dc169ab022-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "cc91f7cc-3f75-46d8-b521-30dc169ab022" (UID: "cc91f7cc-3f75-46d8-b521-30dc169ab022"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.651521 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-krp47\" (UniqueName: \"kubernetes.io/projected/cc91f7cc-3f75-46d8-b521-30dc169ab022-kube-api-access-krp47\") pod \"cc91f7cc-3f75-46d8-b521-30dc169ab022\" (UID: \"cc91f7cc-3f75-46d8-b521-30dc169ab022\") " Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.651670 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4782846a-9f12-486c-b56f-137ef67dc92c-operator-scripts\") pod \"4782846a-9f12-486c-b56f-137ef67dc92c\" (UID: \"4782846a-9f12-486c-b56f-137ef67dc92c\") " Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.652341 4691 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc91f7cc-3f75-46d8-b521-30dc169ab022-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.652466 4691 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/db6ac827-d728-40ca-bc93-b8f406242a9d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.652534 4691 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3a07bbfa-86c4-40b0-aebd-684592d41663-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.652618 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2x59h\" (UniqueName: \"kubernetes.io/projected/db6ac827-d728-40ca-bc93-b8f406242a9d-kube-api-access-2x59h\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.652681 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gm8vx\" (UniqueName: \"kubernetes.io/projected/3a07bbfa-86c4-40b0-aebd-684592d41663-kube-api-access-gm8vx\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.653250 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4782846a-9f12-486c-b56f-137ef67dc92c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4782846a-9f12-486c-b56f-137ef67dc92c" (UID: "4782846a-9f12-486c-b56f-137ef67dc92c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.656619 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc91f7cc-3f75-46d8-b521-30dc169ab022-kube-api-access-krp47" (OuterVolumeSpecName: "kube-api-access-krp47") pod "cc91f7cc-3f75-46d8-b521-30dc169ab022" (UID: "cc91f7cc-3f75-46d8-b521-30dc169ab022"). InnerVolumeSpecName "kube-api-access-krp47". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.656657 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4782846a-9f12-486c-b56f-137ef67dc92c-kube-api-access-tftnv" (OuterVolumeSpecName: "kube-api-access-tftnv") pod "4782846a-9f12-486c-b56f-137ef67dc92c" (UID: "4782846a-9f12-486c-b56f-137ef67dc92c"). InnerVolumeSpecName "kube-api-access-tftnv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.673287 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-77477f4d7b-kclfz" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.746910 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-hs4vf" event={"ID":"4782846a-9f12-486c-b56f-137ef67dc92c","Type":"ContainerDied","Data":"f1be3ac382463185266018806da03c585888a13a8999d2e4c0f829488fad77d9"} Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.746953 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f1be3ac382463185266018806da03c585888a13a8999d2e4c0f829488fad77d9" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.747021 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-hs4vf" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.752632 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-wqkdf" event={"ID":"db6ac827-d728-40ca-bc93-b8f406242a9d","Type":"ContainerDied","Data":"466441ad533d40dcde63e1808a235b7a1ab414e91cf6c3fffcba538b9c7a9713"} Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.752683 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-wqkdf" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.752695 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="466441ad533d40dcde63e1808a235b7a1ab414e91cf6c3fffcba538b9c7a9713" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.753247 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/567ed4cd-aaf3-4e52-be70-2f723075d545-horizon-tls-certs\") pod \"567ed4cd-aaf3-4e52-be70-2f723075d545\" (UID: \"567ed4cd-aaf3-4e52-be70-2f723075d545\") " Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.754677 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/567ed4cd-aaf3-4e52-be70-2f723075d545-scripts\") pod \"567ed4cd-aaf3-4e52-be70-2f723075d545\" (UID: \"567ed4cd-aaf3-4e52-be70-2f723075d545\") " Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.754785 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/567ed4cd-aaf3-4e52-be70-2f723075d545-config-data\") pod \"567ed4cd-aaf3-4e52-be70-2f723075d545\" (UID: \"567ed4cd-aaf3-4e52-be70-2f723075d545\") " Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.755047 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/567ed4cd-aaf3-4e52-be70-2f723075d545-horizon-secret-key\") pod \"567ed4cd-aaf3-4e52-be70-2f723075d545\" (UID: \"567ed4cd-aaf3-4e52-be70-2f723075d545\") " Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.755181 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/567ed4cd-aaf3-4e52-be70-2f723075d545-combined-ca-bundle\") pod \"567ed4cd-aaf3-4e52-be70-2f723075d545\" (UID: \"567ed4cd-aaf3-4e52-be70-2f723075d545\") " Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.755287 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-npn47\" (UniqueName: \"kubernetes.io/projected/567ed4cd-aaf3-4e52-be70-2f723075d545-kube-api-access-npn47\") pod \"567ed4cd-aaf3-4e52-be70-2f723075d545\" (UID: \"567ed4cd-aaf3-4e52-be70-2f723075d545\") " Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.755359 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/567ed4cd-aaf3-4e52-be70-2f723075d545-logs\") pod \"567ed4cd-aaf3-4e52-be70-2f723075d545\" (UID: \"567ed4cd-aaf3-4e52-be70-2f723075d545\") " Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.755689 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-857b-account-create-trvmc" event={"ID":"11d10376-5f07-4a8f-bca5-6ee8172f886f","Type":"ContainerDied","Data":"1dcf79c0288c2c40534bc72853f3faeb884ce2806d86ec26bd1000fda25729f2"} Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.755727 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1dcf79c0288c2c40534bc72853f3faeb884ce2806d86ec26bd1000fda25729f2" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.755785 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-857b-account-create-trvmc" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.757232 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/567ed4cd-aaf3-4e52-be70-2f723075d545-logs" (OuterVolumeSpecName: "logs") pod "567ed4cd-aaf3-4e52-be70-2f723075d545" (UID: "567ed4cd-aaf3-4e52-be70-2f723075d545"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.757598 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-krp47\" (UniqueName: \"kubernetes.io/projected/cc91f7cc-3f75-46d8-b521-30dc169ab022-kube-api-access-krp47\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.758695 4691 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4782846a-9f12-486c-b56f-137ef67dc92c-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.758780 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tftnv\" (UniqueName: \"kubernetes.io/projected/4782846a-9f12-486c-b56f-137ef67dc92c-kube-api-access-tftnv\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.762838 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/567ed4cd-aaf3-4e52-be70-2f723075d545-kube-api-access-npn47" (OuterVolumeSpecName: "kube-api-access-npn47") pod "567ed4cd-aaf3-4e52-be70-2f723075d545" (UID: "567ed4cd-aaf3-4e52-be70-2f723075d545"). InnerVolumeSpecName "kube-api-access-npn47". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.765150 4691 generic.go:334] "Generic (PLEG): container finished" podID="567ed4cd-aaf3-4e52-be70-2f723075d545" containerID="3572712a1270dccc8927c1bebc186c5962ee6c9011a6844588c0df16ab94736c" exitCode=137 Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.765214 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-77477f4d7b-kclfz" event={"ID":"567ed4cd-aaf3-4e52-be70-2f723075d545","Type":"ContainerDied","Data":"3572712a1270dccc8927c1bebc186c5962ee6c9011a6844588c0df16ab94736c"} Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.765242 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-77477f4d7b-kclfz" event={"ID":"567ed4cd-aaf3-4e52-be70-2f723075d545","Type":"ContainerDied","Data":"1445263e4d5ac9005c9d3dc4b4ddbea844bbf4daa7fb8a192568a510ed21da16"} Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.765259 4691 scope.go:117] "RemoveContainer" containerID="6e9caea14320e2c5eb5fe2c000a17f511e9ec24ec9db6ea8fb6a9ec8334da824" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.765377 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-77477f4d7b-kclfz" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.766039 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/567ed4cd-aaf3-4e52-be70-2f723075d545-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "567ed4cd-aaf3-4e52-be70-2f723075d545" (UID: "567ed4cd-aaf3-4e52-be70-2f723075d545"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.773792 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-8a87-account-create-kq8md" event={"ID":"3a07bbfa-86c4-40b0-aebd-684592d41663","Type":"ContainerDied","Data":"93763dd1b81380fb27c9b8c4d82a002b2a2209d7fc43fbb020b040ecf4002936"} Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.773837 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="93763dd1b81380fb27c9b8c4d82a002b2a2209d7fc43fbb020b040ecf4002936" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.773920 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-8a87-account-create-kq8md" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.780272 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-8wsc6" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.783670 4691 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.783701 4691 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.783870 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-8wsc6" event={"ID":"cc91f7cc-3f75-46d8-b521-30dc169ab022","Type":"ContainerDied","Data":"17ebf420aa65751f87f7cfc944cf93b2f5a16d0e18685f37adf1dde4f5fda59b"} Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.783916 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="17ebf420aa65751f87f7cfc944cf93b2f5a16d0e18685f37adf1dde4f5fda59b" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.810097 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/567ed4cd-aaf3-4e52-be70-2f723075d545-config-data" (OuterVolumeSpecName: "config-data") pod "567ed4cd-aaf3-4e52-be70-2f723075d545" (UID: "567ed4cd-aaf3-4e52-be70-2f723075d545"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.827860 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/567ed4cd-aaf3-4e52-be70-2f723075d545-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "567ed4cd-aaf3-4e52-be70-2f723075d545" (UID: "567ed4cd-aaf3-4e52-be70-2f723075d545"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.846011 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/567ed4cd-aaf3-4e52-be70-2f723075d545-scripts" (OuterVolumeSpecName: "scripts") pod "567ed4cd-aaf3-4e52-be70-2f723075d545" (UID: "567ed4cd-aaf3-4e52-be70-2f723075d545"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.862093 4691 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/567ed4cd-aaf3-4e52-be70-2f723075d545-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.862125 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/567ed4cd-aaf3-4e52-be70-2f723075d545-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.862136 4691 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/567ed4cd-aaf3-4e52-be70-2f723075d545-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.862148 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/567ed4cd-aaf3-4e52-be70-2f723075d545-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.862159 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-npn47\" (UniqueName: \"kubernetes.io/projected/567ed4cd-aaf3-4e52-be70-2f723075d545-kube-api-access-npn47\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.862167 4691 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/567ed4cd-aaf3-4e52-be70-2f723075d545-logs\") on node \"crc\" DevicePath \"\"" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.878558 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/567ed4cd-aaf3-4e52-be70-2f723075d545-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "567ed4cd-aaf3-4e52-be70-2f723075d545" (UID: "567ed4cd-aaf3-4e52-be70-2f723075d545"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:14:59 crc kubenswrapper[4691]: I1124 08:14:59.964115 4691 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/567ed4cd-aaf3-4e52-be70-2f723075d545-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.009147 4691 scope.go:117] "RemoveContainer" containerID="3572712a1270dccc8927c1bebc186c5962ee6c9011a6844588c0df16ab94736c" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.052094 4691 scope.go:117] "RemoveContainer" containerID="6e9caea14320e2c5eb5fe2c000a17f511e9ec24ec9db6ea8fb6a9ec8334da824" Nov 24 08:15:00 crc kubenswrapper[4691]: E1124 08:15:00.054034 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e9caea14320e2c5eb5fe2c000a17f511e9ec24ec9db6ea8fb6a9ec8334da824\": container with ID starting with 6e9caea14320e2c5eb5fe2c000a17f511e9ec24ec9db6ea8fb6a9ec8334da824 not found: ID does not exist" containerID="6e9caea14320e2c5eb5fe2c000a17f511e9ec24ec9db6ea8fb6a9ec8334da824" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.054070 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e9caea14320e2c5eb5fe2c000a17f511e9ec24ec9db6ea8fb6a9ec8334da824"} err="failed to get container status \"6e9caea14320e2c5eb5fe2c000a17f511e9ec24ec9db6ea8fb6a9ec8334da824\": rpc error: code = NotFound desc = could not find container \"6e9caea14320e2c5eb5fe2c000a17f511e9ec24ec9db6ea8fb6a9ec8334da824\": container with ID starting with 6e9caea14320e2c5eb5fe2c000a17f511e9ec24ec9db6ea8fb6a9ec8334da824 not found: ID does not exist" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.054092 4691 scope.go:117] "RemoveContainer" containerID="3572712a1270dccc8927c1bebc186c5962ee6c9011a6844588c0df16ab94736c" Nov 24 08:15:00 crc kubenswrapper[4691]: E1124 08:15:00.054380 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3572712a1270dccc8927c1bebc186c5962ee6c9011a6844588c0df16ab94736c\": container with ID starting with 3572712a1270dccc8927c1bebc186c5962ee6c9011a6844588c0df16ab94736c not found: ID does not exist" containerID="3572712a1270dccc8927c1bebc186c5962ee6c9011a6844588c0df16ab94736c" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.054401 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3572712a1270dccc8927c1bebc186c5962ee6c9011a6844588c0df16ab94736c"} err="failed to get container status \"3572712a1270dccc8927c1bebc186c5962ee6c9011a6844588c0df16ab94736c\": rpc error: code = NotFound desc = could not find container \"3572712a1270dccc8927c1bebc186c5962ee6c9011a6844588c0df16ab94736c\": container with ID starting with 3572712a1270dccc8927c1bebc186c5962ee6c9011a6844588c0df16ab94736c not found: ID does not exist" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.121662 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-77477f4d7b-kclfz"] Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.137956 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-77477f4d7b-kclfz"] Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.149200 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399535-2ql5g"] Nov 24 08:15:00 crc kubenswrapper[4691]: E1124 08:15:00.149629 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="567ed4cd-aaf3-4e52-be70-2f723075d545" containerName="horizon-log" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.149651 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="567ed4cd-aaf3-4e52-be70-2f723075d545" containerName="horizon-log" Nov 24 08:15:00 crc kubenswrapper[4691]: E1124 08:15:00.149669 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4782846a-9f12-486c-b56f-137ef67dc92c" containerName="mariadb-database-create" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.149677 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="4782846a-9f12-486c-b56f-137ef67dc92c" containerName="mariadb-database-create" Nov 24 08:15:00 crc kubenswrapper[4691]: E1124 08:15:00.149696 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a07bbfa-86c4-40b0-aebd-684592d41663" containerName="mariadb-account-create" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.149705 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a07bbfa-86c4-40b0-aebd-684592d41663" containerName="mariadb-account-create" Nov 24 08:15:00 crc kubenswrapper[4691]: E1124 08:15:00.149715 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="567ed4cd-aaf3-4e52-be70-2f723075d545" containerName="horizon" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.149722 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="567ed4cd-aaf3-4e52-be70-2f723075d545" containerName="horizon" Nov 24 08:15:00 crc kubenswrapper[4691]: E1124 08:15:00.149740 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc91f7cc-3f75-46d8-b521-30dc169ab022" containerName="mariadb-database-create" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.149748 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc91f7cc-3f75-46d8-b521-30dc169ab022" containerName="mariadb-database-create" Nov 24 08:15:00 crc kubenswrapper[4691]: E1124 08:15:00.149757 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11d10376-5f07-4a8f-bca5-6ee8172f886f" containerName="mariadb-account-create" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.149766 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="11d10376-5f07-4a8f-bca5-6ee8172f886f" containerName="mariadb-account-create" Nov 24 08:15:00 crc kubenswrapper[4691]: E1124 08:15:00.149777 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db6ac827-d728-40ca-bc93-b8f406242a9d" containerName="mariadb-database-create" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.149783 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="db6ac827-d728-40ca-bc93-b8f406242a9d" containerName="mariadb-database-create" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.150023 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="567ed4cd-aaf3-4e52-be70-2f723075d545" containerName="horizon" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.150040 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="db6ac827-d728-40ca-bc93-b8f406242a9d" containerName="mariadb-database-create" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.150049 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="11d10376-5f07-4a8f-bca5-6ee8172f886f" containerName="mariadb-account-create" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.150072 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a07bbfa-86c4-40b0-aebd-684592d41663" containerName="mariadb-account-create" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.150090 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc91f7cc-3f75-46d8-b521-30dc169ab022" containerName="mariadb-database-create" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.150100 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="4782846a-9f12-486c-b56f-137ef67dc92c" containerName="mariadb-database-create" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.150111 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="567ed4cd-aaf3-4e52-be70-2f723075d545" containerName="horizon-log" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.150857 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399535-2ql5g" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.156233 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.156507 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.158284 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399535-2ql5g"] Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.281047 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d6d9f7af-713b-45b1-89b3-2c82272dc6f5-secret-volume\") pod \"collect-profiles-29399535-2ql5g\" (UID: \"d6d9f7af-713b-45b1-89b3-2c82272dc6f5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399535-2ql5g" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.281119 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d6d9f7af-713b-45b1-89b3-2c82272dc6f5-config-volume\") pod \"collect-profiles-29399535-2ql5g\" (UID: \"d6d9f7af-713b-45b1-89b3-2c82272dc6f5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399535-2ql5g" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.281249 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f9s92\" (UniqueName: \"kubernetes.io/projected/d6d9f7af-713b-45b1-89b3-2c82272dc6f5-kube-api-access-f9s92\") pod \"collect-profiles-29399535-2ql5g\" (UID: \"d6d9f7af-713b-45b1-89b3-2c82272dc6f5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399535-2ql5g" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.289864 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-5ab9-account-create-xpsht" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.299818 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.342209 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.393314 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xg6m5\" (UniqueName: \"kubernetes.io/projected/543a3fab-95e8-46c7-b566-b6b394749681-kube-api-access-xg6m5\") pod \"543a3fab-95e8-46c7-b566-b6b394749681\" (UID: \"543a3fab-95e8-46c7-b566-b6b394749681\") " Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.408342 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/543a3fab-95e8-46c7-b566-b6b394749681-operator-scripts\") pod \"543a3fab-95e8-46c7-b566-b6b394749681\" (UID: \"543a3fab-95e8-46c7-b566-b6b394749681\") " Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.415958 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d6d9f7af-713b-45b1-89b3-2c82272dc6f5-secret-volume\") pod \"collect-profiles-29399535-2ql5g\" (UID: \"d6d9f7af-713b-45b1-89b3-2c82272dc6f5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399535-2ql5g" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.416086 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d6d9f7af-713b-45b1-89b3-2c82272dc6f5-config-volume\") pod \"collect-profiles-29399535-2ql5g\" (UID: \"d6d9f7af-713b-45b1-89b3-2c82272dc6f5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399535-2ql5g" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.416374 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f9s92\" (UniqueName: \"kubernetes.io/projected/d6d9f7af-713b-45b1-89b3-2c82272dc6f5-kube-api-access-f9s92\") pod \"collect-profiles-29399535-2ql5g\" (UID: \"d6d9f7af-713b-45b1-89b3-2c82272dc6f5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399535-2ql5g" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.424106 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/543a3fab-95e8-46c7-b566-b6b394749681-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "543a3fab-95e8-46c7-b566-b6b394749681" (UID: "543a3fab-95e8-46c7-b566-b6b394749681"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.433538 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/543a3fab-95e8-46c7-b566-b6b394749681-kube-api-access-xg6m5" (OuterVolumeSpecName: "kube-api-access-xg6m5") pod "543a3fab-95e8-46c7-b566-b6b394749681" (UID: "543a3fab-95e8-46c7-b566-b6b394749681"). InnerVolumeSpecName "kube-api-access-xg6m5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.437619 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d6d9f7af-713b-45b1-89b3-2c82272dc6f5-config-volume\") pod \"collect-profiles-29399535-2ql5g\" (UID: \"d6d9f7af-713b-45b1-89b3-2c82272dc6f5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399535-2ql5g" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.460140 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f9s92\" (UniqueName: \"kubernetes.io/projected/d6d9f7af-713b-45b1-89b3-2c82272dc6f5-kube-api-access-f9s92\") pod \"collect-profiles-29399535-2ql5g\" (UID: \"d6d9f7af-713b-45b1-89b3-2c82272dc6f5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399535-2ql5g" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.495503 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d6d9f7af-713b-45b1-89b3-2c82272dc6f5-secret-volume\") pod \"collect-profiles-29399535-2ql5g\" (UID: \"d6d9f7af-713b-45b1-89b3-2c82272dc6f5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399535-2ql5g" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.535317 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xg6m5\" (UniqueName: \"kubernetes.io/projected/543a3fab-95e8-46c7-b566-b6b394749681-kube-api-access-xg6m5\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.535896 4691 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/543a3fab-95e8-46c7-b566-b6b394749681-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.581432 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399535-2ql5g" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.793047 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="567ed4cd-aaf3-4e52-be70-2f723075d545" path="/var/lib/kubelet/pods/567ed4cd-aaf3-4e52-be70-2f723075d545/volumes" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.839008 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-5ab9-account-create-xpsht" Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.839144 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-5ab9-account-create-xpsht" event={"ID":"543a3fab-95e8-46c7-b566-b6b394749681","Type":"ContainerDied","Data":"c915b5b097a8cb34783fa772e11418cb35fb7fe3822292ac6890a06ee7f98ca6"} Nov 24 08:15:00 crc kubenswrapper[4691]: I1124 08:15:00.839197 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c915b5b097a8cb34783fa772e11418cb35fb7fe3822292ac6890a06ee7f98ca6" Nov 24 08:15:01 crc kubenswrapper[4691]: I1124 08:15:01.180571 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399535-2ql5g"] Nov 24 08:15:01 crc kubenswrapper[4691]: W1124 08:15:01.182042 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd6d9f7af_713b_45b1_89b3_2c82272dc6f5.slice/crio-e7a1fd92e9bfe0634a04dcc70b05dde00c7daa034db623c60c0eb02f4eb75e48 WatchSource:0}: Error finding container e7a1fd92e9bfe0634a04dcc70b05dde00c7daa034db623c60c0eb02f4eb75e48: Status 404 returned error can't find the container with id e7a1fd92e9bfe0634a04dcc70b05dde00c7daa034db623c60c0eb02f4eb75e48 Nov 24 08:15:01 crc kubenswrapper[4691]: I1124 08:15:01.517829 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-5bh8t"] Nov 24 08:15:01 crc kubenswrapper[4691]: E1124 08:15:01.518559 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="543a3fab-95e8-46c7-b566-b6b394749681" containerName="mariadb-account-create" Nov 24 08:15:01 crc kubenswrapper[4691]: I1124 08:15:01.518575 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="543a3fab-95e8-46c7-b566-b6b394749681" containerName="mariadb-account-create" Nov 24 08:15:01 crc kubenswrapper[4691]: I1124 08:15:01.518764 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="543a3fab-95e8-46c7-b566-b6b394749681" containerName="mariadb-account-create" Nov 24 08:15:01 crc kubenswrapper[4691]: I1124 08:15:01.519357 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-5bh8t" Nov 24 08:15:01 crc kubenswrapper[4691]: I1124 08:15:01.522354 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 24 08:15:01 crc kubenswrapper[4691]: I1124 08:15:01.522576 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-s5244" Nov 24 08:15:01 crc kubenswrapper[4691]: I1124 08:15:01.523694 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 24 08:15:01 crc kubenswrapper[4691]: I1124 08:15:01.547077 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-5bh8t"] Nov 24 08:15:01 crc kubenswrapper[4691]: I1124 08:15:01.564242 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35cf516f-ba01-46fe-97d5-36ae7b99f35e-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-5bh8t\" (UID: \"35cf516f-ba01-46fe-97d5-36ae7b99f35e\") " pod="openstack/nova-cell0-conductor-db-sync-5bh8t" Nov 24 08:15:01 crc kubenswrapper[4691]: I1124 08:15:01.564300 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fb9ln\" (UniqueName: \"kubernetes.io/projected/35cf516f-ba01-46fe-97d5-36ae7b99f35e-kube-api-access-fb9ln\") pod \"nova-cell0-conductor-db-sync-5bh8t\" (UID: \"35cf516f-ba01-46fe-97d5-36ae7b99f35e\") " pod="openstack/nova-cell0-conductor-db-sync-5bh8t" Nov 24 08:15:01 crc kubenswrapper[4691]: I1124 08:15:01.564376 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/35cf516f-ba01-46fe-97d5-36ae7b99f35e-scripts\") pod \"nova-cell0-conductor-db-sync-5bh8t\" (UID: \"35cf516f-ba01-46fe-97d5-36ae7b99f35e\") " pod="openstack/nova-cell0-conductor-db-sync-5bh8t" Nov 24 08:15:01 crc kubenswrapper[4691]: I1124 08:15:01.564512 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35cf516f-ba01-46fe-97d5-36ae7b99f35e-config-data\") pod \"nova-cell0-conductor-db-sync-5bh8t\" (UID: \"35cf516f-ba01-46fe-97d5-36ae7b99f35e\") " pod="openstack/nova-cell0-conductor-db-sync-5bh8t" Nov 24 08:15:01 crc kubenswrapper[4691]: I1124 08:15:01.665728 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/35cf516f-ba01-46fe-97d5-36ae7b99f35e-scripts\") pod \"nova-cell0-conductor-db-sync-5bh8t\" (UID: \"35cf516f-ba01-46fe-97d5-36ae7b99f35e\") " pod="openstack/nova-cell0-conductor-db-sync-5bh8t" Nov 24 08:15:01 crc kubenswrapper[4691]: I1124 08:15:01.665854 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35cf516f-ba01-46fe-97d5-36ae7b99f35e-config-data\") pod \"nova-cell0-conductor-db-sync-5bh8t\" (UID: \"35cf516f-ba01-46fe-97d5-36ae7b99f35e\") " pod="openstack/nova-cell0-conductor-db-sync-5bh8t" Nov 24 08:15:01 crc kubenswrapper[4691]: I1124 08:15:01.665908 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35cf516f-ba01-46fe-97d5-36ae7b99f35e-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-5bh8t\" (UID: \"35cf516f-ba01-46fe-97d5-36ae7b99f35e\") " pod="openstack/nova-cell0-conductor-db-sync-5bh8t" Nov 24 08:15:01 crc kubenswrapper[4691]: I1124 08:15:01.665938 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fb9ln\" (UniqueName: \"kubernetes.io/projected/35cf516f-ba01-46fe-97d5-36ae7b99f35e-kube-api-access-fb9ln\") pod \"nova-cell0-conductor-db-sync-5bh8t\" (UID: \"35cf516f-ba01-46fe-97d5-36ae7b99f35e\") " pod="openstack/nova-cell0-conductor-db-sync-5bh8t" Nov 24 08:15:01 crc kubenswrapper[4691]: I1124 08:15:01.675713 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35cf516f-ba01-46fe-97d5-36ae7b99f35e-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-5bh8t\" (UID: \"35cf516f-ba01-46fe-97d5-36ae7b99f35e\") " pod="openstack/nova-cell0-conductor-db-sync-5bh8t" Nov 24 08:15:01 crc kubenswrapper[4691]: I1124 08:15:01.675836 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35cf516f-ba01-46fe-97d5-36ae7b99f35e-config-data\") pod \"nova-cell0-conductor-db-sync-5bh8t\" (UID: \"35cf516f-ba01-46fe-97d5-36ae7b99f35e\") " pod="openstack/nova-cell0-conductor-db-sync-5bh8t" Nov 24 08:15:01 crc kubenswrapper[4691]: I1124 08:15:01.680423 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/35cf516f-ba01-46fe-97d5-36ae7b99f35e-scripts\") pod \"nova-cell0-conductor-db-sync-5bh8t\" (UID: \"35cf516f-ba01-46fe-97d5-36ae7b99f35e\") " pod="openstack/nova-cell0-conductor-db-sync-5bh8t" Nov 24 08:15:01 crc kubenswrapper[4691]: I1124 08:15:01.708046 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fb9ln\" (UniqueName: \"kubernetes.io/projected/35cf516f-ba01-46fe-97d5-36ae7b99f35e-kube-api-access-fb9ln\") pod \"nova-cell0-conductor-db-sync-5bh8t\" (UID: \"35cf516f-ba01-46fe-97d5-36ae7b99f35e\") " pod="openstack/nova-cell0-conductor-db-sync-5bh8t" Nov 24 08:15:01 crc kubenswrapper[4691]: I1124 08:15:01.836249 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-5bh8t" Nov 24 08:15:01 crc kubenswrapper[4691]: I1124 08:15:01.850796 4691 generic.go:334] "Generic (PLEG): container finished" podID="d6d9f7af-713b-45b1-89b3-2c82272dc6f5" containerID="29ec6b3b9920f7a0874cae6941d0d5813d0d341c625e814f84b86cb3714ce5c8" exitCode=0 Nov 24 08:15:01 crc kubenswrapper[4691]: I1124 08:15:01.851058 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399535-2ql5g" event={"ID":"d6d9f7af-713b-45b1-89b3-2c82272dc6f5","Type":"ContainerDied","Data":"29ec6b3b9920f7a0874cae6941d0d5813d0d341c625e814f84b86cb3714ce5c8"} Nov 24 08:15:01 crc kubenswrapper[4691]: I1124 08:15:01.851211 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399535-2ql5g" event={"ID":"d6d9f7af-713b-45b1-89b3-2c82272dc6f5","Type":"ContainerStarted","Data":"e7a1fd92e9bfe0634a04dcc70b05dde00c7daa034db623c60c0eb02f4eb75e48"} Nov 24 08:15:02 crc kubenswrapper[4691]: I1124 08:15:02.469044 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-5bh8t"] Nov 24 08:15:02 crc kubenswrapper[4691]: I1124 08:15:02.796263 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 24 08:15:02 crc kubenswrapper[4691]: I1124 08:15:02.862771 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-5bh8t" event={"ID":"35cf516f-ba01-46fe-97d5-36ae7b99f35e","Type":"ContainerStarted","Data":"50eb01671f5cbbc9e5d2227d86294f4e9c10feeaa828839ce10bfd27c7868f92"} Nov 24 08:15:03 crc kubenswrapper[4691]: I1124 08:15:03.321657 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399535-2ql5g" Nov 24 08:15:03 crc kubenswrapper[4691]: I1124 08:15:03.509527 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d6d9f7af-713b-45b1-89b3-2c82272dc6f5-config-volume\") pod \"d6d9f7af-713b-45b1-89b3-2c82272dc6f5\" (UID: \"d6d9f7af-713b-45b1-89b3-2c82272dc6f5\") " Nov 24 08:15:03 crc kubenswrapper[4691]: I1124 08:15:03.509764 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d6d9f7af-713b-45b1-89b3-2c82272dc6f5-secret-volume\") pod \"d6d9f7af-713b-45b1-89b3-2c82272dc6f5\" (UID: \"d6d9f7af-713b-45b1-89b3-2c82272dc6f5\") " Nov 24 08:15:03 crc kubenswrapper[4691]: I1124 08:15:03.509798 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f9s92\" (UniqueName: \"kubernetes.io/projected/d6d9f7af-713b-45b1-89b3-2c82272dc6f5-kube-api-access-f9s92\") pod \"d6d9f7af-713b-45b1-89b3-2c82272dc6f5\" (UID: \"d6d9f7af-713b-45b1-89b3-2c82272dc6f5\") " Nov 24 08:15:03 crc kubenswrapper[4691]: I1124 08:15:03.511880 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d6d9f7af-713b-45b1-89b3-2c82272dc6f5-config-volume" (OuterVolumeSpecName: "config-volume") pod "d6d9f7af-713b-45b1-89b3-2c82272dc6f5" (UID: "d6d9f7af-713b-45b1-89b3-2c82272dc6f5"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:15:03 crc kubenswrapper[4691]: I1124 08:15:03.533675 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6d9f7af-713b-45b1-89b3-2c82272dc6f5-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "d6d9f7af-713b-45b1-89b3-2c82272dc6f5" (UID: "d6d9f7af-713b-45b1-89b3-2c82272dc6f5"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:15:03 crc kubenswrapper[4691]: I1124 08:15:03.533736 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d6d9f7af-713b-45b1-89b3-2c82272dc6f5-kube-api-access-f9s92" (OuterVolumeSpecName: "kube-api-access-f9s92") pod "d6d9f7af-713b-45b1-89b3-2c82272dc6f5" (UID: "d6d9f7af-713b-45b1-89b3-2c82272dc6f5"). InnerVolumeSpecName "kube-api-access-f9s92". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:15:03 crc kubenswrapper[4691]: I1124 08:15:03.611944 4691 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d6d9f7af-713b-45b1-89b3-2c82272dc6f5-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:03 crc kubenswrapper[4691]: I1124 08:15:03.612411 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f9s92\" (UniqueName: \"kubernetes.io/projected/d6d9f7af-713b-45b1-89b3-2c82272dc6f5-kube-api-access-f9s92\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:03 crc kubenswrapper[4691]: I1124 08:15:03.612422 4691 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d6d9f7af-713b-45b1-89b3-2c82272dc6f5-config-volume\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:03 crc kubenswrapper[4691]: I1124 08:15:03.876414 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399535-2ql5g" event={"ID":"d6d9f7af-713b-45b1-89b3-2c82272dc6f5","Type":"ContainerDied","Data":"e7a1fd92e9bfe0634a04dcc70b05dde00c7daa034db623c60c0eb02f4eb75e48"} Nov 24 08:15:03 crc kubenswrapper[4691]: I1124 08:15:03.876489 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e7a1fd92e9bfe0634a04dcc70b05dde00c7daa034db623c60c0eb02f4eb75e48" Nov 24 08:15:03 crc kubenswrapper[4691]: I1124 08:15:03.876725 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399535-2ql5g" Nov 24 08:15:09 crc kubenswrapper[4691]: I1124 08:15:09.938641 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-5bh8t" event={"ID":"35cf516f-ba01-46fe-97d5-36ae7b99f35e","Type":"ContainerStarted","Data":"e3d6efccbbbeabf8dda8a717ad37242b942404986a4e0a65be4fb4ec8119fcb9"} Nov 24 08:15:09 crc kubenswrapper[4691]: I1124 08:15:09.963652 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-5bh8t" podStartSLOduration=1.793570406 podStartE2EDuration="8.963623981s" podCreationTimestamp="2025-11-24 08:15:01 +0000 UTC" firstStartedPulling="2025-11-24 08:15:02.493657034 +0000 UTC m=+1064.492606283" lastFinishedPulling="2025-11-24 08:15:09.663710609 +0000 UTC m=+1071.662659858" observedRunningTime="2025-11-24 08:15:09.955356241 +0000 UTC m=+1071.954305500" watchObservedRunningTime="2025-11-24 08:15:09.963623981 +0000 UTC m=+1071.962573230" Nov 24 08:15:15 crc kubenswrapper[4691]: I1124 08:15:15.706012 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="366a5d80-56a5-4847-9bb6-2e588797c1c7" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 24 08:15:20 crc kubenswrapper[4691]: I1124 08:15:20.945296 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.019677 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/366a5d80-56a5-4847-9bb6-2e588797c1c7-scripts\") pod \"366a5d80-56a5-4847-9bb6-2e588797c1c7\" (UID: \"366a5d80-56a5-4847-9bb6-2e588797c1c7\") " Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.019849 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8nfdl\" (UniqueName: \"kubernetes.io/projected/366a5d80-56a5-4847-9bb6-2e588797c1c7-kube-api-access-8nfdl\") pod \"366a5d80-56a5-4847-9bb6-2e588797c1c7\" (UID: \"366a5d80-56a5-4847-9bb6-2e588797c1c7\") " Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.020053 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/366a5d80-56a5-4847-9bb6-2e588797c1c7-log-httpd\") pod \"366a5d80-56a5-4847-9bb6-2e588797c1c7\" (UID: \"366a5d80-56a5-4847-9bb6-2e588797c1c7\") " Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.020093 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/366a5d80-56a5-4847-9bb6-2e588797c1c7-run-httpd\") pod \"366a5d80-56a5-4847-9bb6-2e588797c1c7\" (UID: \"366a5d80-56a5-4847-9bb6-2e588797c1c7\") " Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.020128 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/366a5d80-56a5-4847-9bb6-2e588797c1c7-sg-core-conf-yaml\") pod \"366a5d80-56a5-4847-9bb6-2e588797c1c7\" (UID: \"366a5d80-56a5-4847-9bb6-2e588797c1c7\") " Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.020198 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/366a5d80-56a5-4847-9bb6-2e588797c1c7-combined-ca-bundle\") pod \"366a5d80-56a5-4847-9bb6-2e588797c1c7\" (UID: \"366a5d80-56a5-4847-9bb6-2e588797c1c7\") " Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.020261 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/366a5d80-56a5-4847-9bb6-2e588797c1c7-config-data\") pod \"366a5d80-56a5-4847-9bb6-2e588797c1c7\" (UID: \"366a5d80-56a5-4847-9bb6-2e588797c1c7\") " Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.021256 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/366a5d80-56a5-4847-9bb6-2e588797c1c7-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "366a5d80-56a5-4847-9bb6-2e588797c1c7" (UID: "366a5d80-56a5-4847-9bb6-2e588797c1c7"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.021700 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/366a5d80-56a5-4847-9bb6-2e588797c1c7-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "366a5d80-56a5-4847-9bb6-2e588797c1c7" (UID: "366a5d80-56a5-4847-9bb6-2e588797c1c7"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.028177 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/366a5d80-56a5-4847-9bb6-2e588797c1c7-kube-api-access-8nfdl" (OuterVolumeSpecName: "kube-api-access-8nfdl") pod "366a5d80-56a5-4847-9bb6-2e588797c1c7" (UID: "366a5d80-56a5-4847-9bb6-2e588797c1c7"). InnerVolumeSpecName "kube-api-access-8nfdl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.052742 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/366a5d80-56a5-4847-9bb6-2e588797c1c7-scripts" (OuterVolumeSpecName: "scripts") pod "366a5d80-56a5-4847-9bb6-2e588797c1c7" (UID: "366a5d80-56a5-4847-9bb6-2e588797c1c7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.056004 4691 generic.go:334] "Generic (PLEG): container finished" podID="35cf516f-ba01-46fe-97d5-36ae7b99f35e" containerID="e3d6efccbbbeabf8dda8a717ad37242b942404986a4e0a65be4fb4ec8119fcb9" exitCode=0 Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.056108 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-5bh8t" event={"ID":"35cf516f-ba01-46fe-97d5-36ae7b99f35e","Type":"ContainerDied","Data":"e3d6efccbbbeabf8dda8a717ad37242b942404986a4e0a65be4fb4ec8119fcb9"} Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.057901 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/366a5d80-56a5-4847-9bb6-2e588797c1c7-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "366a5d80-56a5-4847-9bb6-2e588797c1c7" (UID: "366a5d80-56a5-4847-9bb6-2e588797c1c7"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.080743 4691 generic.go:334] "Generic (PLEG): container finished" podID="366a5d80-56a5-4847-9bb6-2e588797c1c7" containerID="6e814cdb7c9f4c419bd886e77f7f6d194c512bea3a24dc4468bc8ce80ca8122e" exitCode=137 Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.080791 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"366a5d80-56a5-4847-9bb6-2e588797c1c7","Type":"ContainerDied","Data":"6e814cdb7c9f4c419bd886e77f7f6d194c512bea3a24dc4468bc8ce80ca8122e"} Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.080819 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"366a5d80-56a5-4847-9bb6-2e588797c1c7","Type":"ContainerDied","Data":"499bcbe2fc20cfd01608130e2afccb058228206971422ae1ae51eab8a5a4a4d1"} Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.080838 4691 scope.go:117] "RemoveContainer" containerID="6e814cdb7c9f4c419bd886e77f7f6d194c512bea3a24dc4468bc8ce80ca8122e" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.080995 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.122746 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8nfdl\" (UniqueName: \"kubernetes.io/projected/366a5d80-56a5-4847-9bb6-2e588797c1c7-kube-api-access-8nfdl\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.122786 4691 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/366a5d80-56a5-4847-9bb6-2e588797c1c7-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.122798 4691 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/366a5d80-56a5-4847-9bb6-2e588797c1c7-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.122810 4691 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/366a5d80-56a5-4847-9bb6-2e588797c1c7-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.122822 4691 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/366a5d80-56a5-4847-9bb6-2e588797c1c7-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.131425 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/366a5d80-56a5-4847-9bb6-2e588797c1c7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "366a5d80-56a5-4847-9bb6-2e588797c1c7" (UID: "366a5d80-56a5-4847-9bb6-2e588797c1c7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.167522 4691 scope.go:117] "RemoveContainer" containerID="ba81a9ed52a6c4097253f3cb0dbc726c298d6822688c600b061dbd4869e5d8cb" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.169403 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/366a5d80-56a5-4847-9bb6-2e588797c1c7-config-data" (OuterVolumeSpecName: "config-data") pod "366a5d80-56a5-4847-9bb6-2e588797c1c7" (UID: "366a5d80-56a5-4847-9bb6-2e588797c1c7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.187949 4691 scope.go:117] "RemoveContainer" containerID="f9df67644fdbe22e920db3264c8753bda7a248d4c56df984da41ff3073911f9c" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.215272 4691 scope.go:117] "RemoveContainer" containerID="55611b5ff45330cd7521224586567f6e788cef5688e0fa08a592dd36c682b005" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.225034 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/366a5d80-56a5-4847-9bb6-2e588797c1c7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.225066 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/366a5d80-56a5-4847-9bb6-2e588797c1c7-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.243275 4691 scope.go:117] "RemoveContainer" containerID="6e814cdb7c9f4c419bd886e77f7f6d194c512bea3a24dc4468bc8ce80ca8122e" Nov 24 08:15:21 crc kubenswrapper[4691]: E1124 08:15:21.243898 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e814cdb7c9f4c419bd886e77f7f6d194c512bea3a24dc4468bc8ce80ca8122e\": container with ID starting with 6e814cdb7c9f4c419bd886e77f7f6d194c512bea3a24dc4468bc8ce80ca8122e not found: ID does not exist" containerID="6e814cdb7c9f4c419bd886e77f7f6d194c512bea3a24dc4468bc8ce80ca8122e" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.243965 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e814cdb7c9f4c419bd886e77f7f6d194c512bea3a24dc4468bc8ce80ca8122e"} err="failed to get container status \"6e814cdb7c9f4c419bd886e77f7f6d194c512bea3a24dc4468bc8ce80ca8122e\": rpc error: code = NotFound desc = could not find container \"6e814cdb7c9f4c419bd886e77f7f6d194c512bea3a24dc4468bc8ce80ca8122e\": container with ID starting with 6e814cdb7c9f4c419bd886e77f7f6d194c512bea3a24dc4468bc8ce80ca8122e not found: ID does not exist" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.243999 4691 scope.go:117] "RemoveContainer" containerID="ba81a9ed52a6c4097253f3cb0dbc726c298d6822688c600b061dbd4869e5d8cb" Nov 24 08:15:21 crc kubenswrapper[4691]: E1124 08:15:21.244635 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba81a9ed52a6c4097253f3cb0dbc726c298d6822688c600b061dbd4869e5d8cb\": container with ID starting with ba81a9ed52a6c4097253f3cb0dbc726c298d6822688c600b061dbd4869e5d8cb not found: ID does not exist" containerID="ba81a9ed52a6c4097253f3cb0dbc726c298d6822688c600b061dbd4869e5d8cb" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.244671 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba81a9ed52a6c4097253f3cb0dbc726c298d6822688c600b061dbd4869e5d8cb"} err="failed to get container status \"ba81a9ed52a6c4097253f3cb0dbc726c298d6822688c600b061dbd4869e5d8cb\": rpc error: code = NotFound desc = could not find container \"ba81a9ed52a6c4097253f3cb0dbc726c298d6822688c600b061dbd4869e5d8cb\": container with ID starting with ba81a9ed52a6c4097253f3cb0dbc726c298d6822688c600b061dbd4869e5d8cb not found: ID does not exist" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.244698 4691 scope.go:117] "RemoveContainer" containerID="f9df67644fdbe22e920db3264c8753bda7a248d4c56df984da41ff3073911f9c" Nov 24 08:15:21 crc kubenswrapper[4691]: E1124 08:15:21.245051 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f9df67644fdbe22e920db3264c8753bda7a248d4c56df984da41ff3073911f9c\": container with ID starting with f9df67644fdbe22e920db3264c8753bda7a248d4c56df984da41ff3073911f9c not found: ID does not exist" containerID="f9df67644fdbe22e920db3264c8753bda7a248d4c56df984da41ff3073911f9c" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.245094 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9df67644fdbe22e920db3264c8753bda7a248d4c56df984da41ff3073911f9c"} err="failed to get container status \"f9df67644fdbe22e920db3264c8753bda7a248d4c56df984da41ff3073911f9c\": rpc error: code = NotFound desc = could not find container \"f9df67644fdbe22e920db3264c8753bda7a248d4c56df984da41ff3073911f9c\": container with ID starting with f9df67644fdbe22e920db3264c8753bda7a248d4c56df984da41ff3073911f9c not found: ID does not exist" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.245109 4691 scope.go:117] "RemoveContainer" containerID="55611b5ff45330cd7521224586567f6e788cef5688e0fa08a592dd36c682b005" Nov 24 08:15:21 crc kubenswrapper[4691]: E1124 08:15:21.245733 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"55611b5ff45330cd7521224586567f6e788cef5688e0fa08a592dd36c682b005\": container with ID starting with 55611b5ff45330cd7521224586567f6e788cef5688e0fa08a592dd36c682b005 not found: ID does not exist" containerID="55611b5ff45330cd7521224586567f6e788cef5688e0fa08a592dd36c682b005" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.245780 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"55611b5ff45330cd7521224586567f6e788cef5688e0fa08a592dd36c682b005"} err="failed to get container status \"55611b5ff45330cd7521224586567f6e788cef5688e0fa08a592dd36c682b005\": rpc error: code = NotFound desc = could not find container \"55611b5ff45330cd7521224586567f6e788cef5688e0fa08a592dd36c682b005\": container with ID starting with 55611b5ff45330cd7521224586567f6e788cef5688e0fa08a592dd36c682b005 not found: ID does not exist" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.417531 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.433292 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.444769 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:15:21 crc kubenswrapper[4691]: E1124 08:15:21.445301 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="366a5d80-56a5-4847-9bb6-2e588797c1c7" containerName="sg-core" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.445323 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="366a5d80-56a5-4847-9bb6-2e588797c1c7" containerName="sg-core" Nov 24 08:15:21 crc kubenswrapper[4691]: E1124 08:15:21.445347 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="366a5d80-56a5-4847-9bb6-2e588797c1c7" containerName="proxy-httpd" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.445358 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="366a5d80-56a5-4847-9bb6-2e588797c1c7" containerName="proxy-httpd" Nov 24 08:15:21 crc kubenswrapper[4691]: E1124 08:15:21.445374 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="366a5d80-56a5-4847-9bb6-2e588797c1c7" containerName="ceilometer-notification-agent" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.445383 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="366a5d80-56a5-4847-9bb6-2e588797c1c7" containerName="ceilometer-notification-agent" Nov 24 08:15:21 crc kubenswrapper[4691]: E1124 08:15:21.445399 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d6d9f7af-713b-45b1-89b3-2c82272dc6f5" containerName="collect-profiles" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.445406 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="d6d9f7af-713b-45b1-89b3-2c82272dc6f5" containerName="collect-profiles" Nov 24 08:15:21 crc kubenswrapper[4691]: E1124 08:15:21.445436 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="366a5d80-56a5-4847-9bb6-2e588797c1c7" containerName="ceilometer-central-agent" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.445464 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="366a5d80-56a5-4847-9bb6-2e588797c1c7" containerName="ceilometer-central-agent" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.445698 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="366a5d80-56a5-4847-9bb6-2e588797c1c7" containerName="proxy-httpd" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.445726 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="d6d9f7af-713b-45b1-89b3-2c82272dc6f5" containerName="collect-profiles" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.445778 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="366a5d80-56a5-4847-9bb6-2e588797c1c7" containerName="ceilometer-central-agent" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.445815 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="366a5d80-56a5-4847-9bb6-2e588797c1c7" containerName="ceilometer-notification-agent" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.445838 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="366a5d80-56a5-4847-9bb6-2e588797c1c7" containerName="sg-core" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.448129 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.453392 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.453644 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.482760 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.531421 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/30afd527-ea6d-41a6-8f4d-8d60a2933f01-log-httpd\") pod \"ceilometer-0\" (UID: \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\") " pod="openstack/ceilometer-0" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.531583 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/30afd527-ea6d-41a6-8f4d-8d60a2933f01-scripts\") pod \"ceilometer-0\" (UID: \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\") " pod="openstack/ceilometer-0" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.531613 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30afd527-ea6d-41a6-8f4d-8d60a2933f01-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\") " pod="openstack/ceilometer-0" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.531718 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2v5b\" (UniqueName: \"kubernetes.io/projected/30afd527-ea6d-41a6-8f4d-8d60a2933f01-kube-api-access-m2v5b\") pod \"ceilometer-0\" (UID: \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\") " pod="openstack/ceilometer-0" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.531770 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30afd527-ea6d-41a6-8f4d-8d60a2933f01-config-data\") pod \"ceilometer-0\" (UID: \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\") " pod="openstack/ceilometer-0" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.531864 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/30afd527-ea6d-41a6-8f4d-8d60a2933f01-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\") " pod="openstack/ceilometer-0" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.531888 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/30afd527-ea6d-41a6-8f4d-8d60a2933f01-run-httpd\") pod \"ceilometer-0\" (UID: \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\") " pod="openstack/ceilometer-0" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.633111 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/30afd527-ea6d-41a6-8f4d-8d60a2933f01-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\") " pod="openstack/ceilometer-0" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.633169 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/30afd527-ea6d-41a6-8f4d-8d60a2933f01-run-httpd\") pod \"ceilometer-0\" (UID: \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\") " pod="openstack/ceilometer-0" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.633248 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/30afd527-ea6d-41a6-8f4d-8d60a2933f01-log-httpd\") pod \"ceilometer-0\" (UID: \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\") " pod="openstack/ceilometer-0" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.633283 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/30afd527-ea6d-41a6-8f4d-8d60a2933f01-scripts\") pod \"ceilometer-0\" (UID: \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\") " pod="openstack/ceilometer-0" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.633304 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30afd527-ea6d-41a6-8f4d-8d60a2933f01-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\") " pod="openstack/ceilometer-0" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.633389 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2v5b\" (UniqueName: \"kubernetes.io/projected/30afd527-ea6d-41a6-8f4d-8d60a2933f01-kube-api-access-m2v5b\") pod \"ceilometer-0\" (UID: \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\") " pod="openstack/ceilometer-0" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.633432 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30afd527-ea6d-41a6-8f4d-8d60a2933f01-config-data\") pod \"ceilometer-0\" (UID: \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\") " pod="openstack/ceilometer-0" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.634228 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/30afd527-ea6d-41a6-8f4d-8d60a2933f01-run-httpd\") pod \"ceilometer-0\" (UID: \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\") " pod="openstack/ceilometer-0" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.634407 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/30afd527-ea6d-41a6-8f4d-8d60a2933f01-log-httpd\") pod \"ceilometer-0\" (UID: \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\") " pod="openstack/ceilometer-0" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.638559 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30afd527-ea6d-41a6-8f4d-8d60a2933f01-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\") " pod="openstack/ceilometer-0" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.638569 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/30afd527-ea6d-41a6-8f4d-8d60a2933f01-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\") " pod="openstack/ceilometer-0" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.638949 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30afd527-ea6d-41a6-8f4d-8d60a2933f01-config-data\") pod \"ceilometer-0\" (UID: \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\") " pod="openstack/ceilometer-0" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.639801 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/30afd527-ea6d-41a6-8f4d-8d60a2933f01-scripts\") pod \"ceilometer-0\" (UID: \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\") " pod="openstack/ceilometer-0" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.652147 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2v5b\" (UniqueName: \"kubernetes.io/projected/30afd527-ea6d-41a6-8f4d-8d60a2933f01-kube-api-access-m2v5b\") pod \"ceilometer-0\" (UID: \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\") " pod="openstack/ceilometer-0" Nov 24 08:15:21 crc kubenswrapper[4691]: I1124 08:15:21.771719 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 08:15:22 crc kubenswrapper[4691]: I1124 08:15:22.255782 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:15:22 crc kubenswrapper[4691]: W1124 08:15:22.280078 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod30afd527_ea6d_41a6_8f4d_8d60a2933f01.slice/crio-f7143dc52f074870823fdf41e955ea2962ec69e0ee1e089df5ca8a197ff2e1bc WatchSource:0}: Error finding container f7143dc52f074870823fdf41e955ea2962ec69e0ee1e089df5ca8a197ff2e1bc: Status 404 returned error can't find the container with id f7143dc52f074870823fdf41e955ea2962ec69e0ee1e089df5ca8a197ff2e1bc Nov 24 08:15:22 crc kubenswrapper[4691]: I1124 08:15:22.385682 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-5bh8t" Nov 24 08:15:22 crc kubenswrapper[4691]: I1124 08:15:22.449214 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35cf516f-ba01-46fe-97d5-36ae7b99f35e-combined-ca-bundle\") pod \"35cf516f-ba01-46fe-97d5-36ae7b99f35e\" (UID: \"35cf516f-ba01-46fe-97d5-36ae7b99f35e\") " Nov 24 08:15:22 crc kubenswrapper[4691]: I1124 08:15:22.449827 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/35cf516f-ba01-46fe-97d5-36ae7b99f35e-scripts\") pod \"35cf516f-ba01-46fe-97d5-36ae7b99f35e\" (UID: \"35cf516f-ba01-46fe-97d5-36ae7b99f35e\") " Nov 24 08:15:22 crc kubenswrapper[4691]: I1124 08:15:22.449864 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fb9ln\" (UniqueName: \"kubernetes.io/projected/35cf516f-ba01-46fe-97d5-36ae7b99f35e-kube-api-access-fb9ln\") pod \"35cf516f-ba01-46fe-97d5-36ae7b99f35e\" (UID: \"35cf516f-ba01-46fe-97d5-36ae7b99f35e\") " Nov 24 08:15:22 crc kubenswrapper[4691]: I1124 08:15:22.450797 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35cf516f-ba01-46fe-97d5-36ae7b99f35e-config-data\") pod \"35cf516f-ba01-46fe-97d5-36ae7b99f35e\" (UID: \"35cf516f-ba01-46fe-97d5-36ae7b99f35e\") " Nov 24 08:15:22 crc kubenswrapper[4691]: I1124 08:15:22.456134 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/35cf516f-ba01-46fe-97d5-36ae7b99f35e-scripts" (OuterVolumeSpecName: "scripts") pod "35cf516f-ba01-46fe-97d5-36ae7b99f35e" (UID: "35cf516f-ba01-46fe-97d5-36ae7b99f35e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:15:22 crc kubenswrapper[4691]: I1124 08:15:22.457302 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/35cf516f-ba01-46fe-97d5-36ae7b99f35e-kube-api-access-fb9ln" (OuterVolumeSpecName: "kube-api-access-fb9ln") pod "35cf516f-ba01-46fe-97d5-36ae7b99f35e" (UID: "35cf516f-ba01-46fe-97d5-36ae7b99f35e"). InnerVolumeSpecName "kube-api-access-fb9ln". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:15:22 crc kubenswrapper[4691]: I1124 08:15:22.478962 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/35cf516f-ba01-46fe-97d5-36ae7b99f35e-config-data" (OuterVolumeSpecName: "config-data") pod "35cf516f-ba01-46fe-97d5-36ae7b99f35e" (UID: "35cf516f-ba01-46fe-97d5-36ae7b99f35e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:15:22 crc kubenswrapper[4691]: I1124 08:15:22.484003 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/35cf516f-ba01-46fe-97d5-36ae7b99f35e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "35cf516f-ba01-46fe-97d5-36ae7b99f35e" (UID: "35cf516f-ba01-46fe-97d5-36ae7b99f35e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:15:22 crc kubenswrapper[4691]: I1124 08:15:22.552387 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35cf516f-ba01-46fe-97d5-36ae7b99f35e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:22 crc kubenswrapper[4691]: I1124 08:15:22.552421 4691 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/35cf516f-ba01-46fe-97d5-36ae7b99f35e-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:22 crc kubenswrapper[4691]: I1124 08:15:22.552430 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fb9ln\" (UniqueName: \"kubernetes.io/projected/35cf516f-ba01-46fe-97d5-36ae7b99f35e-kube-api-access-fb9ln\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:22 crc kubenswrapper[4691]: I1124 08:15:22.552440 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35cf516f-ba01-46fe-97d5-36ae7b99f35e-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:22 crc kubenswrapper[4691]: I1124 08:15:22.774280 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="366a5d80-56a5-4847-9bb6-2e588797c1c7" path="/var/lib/kubelet/pods/366a5d80-56a5-4847-9bb6-2e588797c1c7/volumes" Nov 24 08:15:23 crc kubenswrapper[4691]: I1124 08:15:23.113185 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-5bh8t" event={"ID":"35cf516f-ba01-46fe-97d5-36ae7b99f35e","Type":"ContainerDied","Data":"50eb01671f5cbbc9e5d2227d86294f4e9c10feeaa828839ce10bfd27c7868f92"} Nov 24 08:15:23 crc kubenswrapper[4691]: I1124 08:15:23.113484 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="50eb01671f5cbbc9e5d2227d86294f4e9c10feeaa828839ce10bfd27c7868f92" Nov 24 08:15:23 crc kubenswrapper[4691]: I1124 08:15:23.113570 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-5bh8t" Nov 24 08:15:23 crc kubenswrapper[4691]: I1124 08:15:23.117370 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"30afd527-ea6d-41a6-8f4d-8d60a2933f01","Type":"ContainerStarted","Data":"d8d801b8ad96540a03943c81a4d497b11f3ac0d1d5cc8da05a8712f527cf69b2"} Nov 24 08:15:23 crc kubenswrapper[4691]: I1124 08:15:23.117430 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"30afd527-ea6d-41a6-8f4d-8d60a2933f01","Type":"ContainerStarted","Data":"f7143dc52f074870823fdf41e955ea2962ec69e0ee1e089df5ca8a197ff2e1bc"} Nov 24 08:15:23 crc kubenswrapper[4691]: I1124 08:15:23.180006 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 24 08:15:23 crc kubenswrapper[4691]: E1124 08:15:23.180438 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35cf516f-ba01-46fe-97d5-36ae7b99f35e" containerName="nova-cell0-conductor-db-sync" Nov 24 08:15:23 crc kubenswrapper[4691]: I1124 08:15:23.180472 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="35cf516f-ba01-46fe-97d5-36ae7b99f35e" containerName="nova-cell0-conductor-db-sync" Nov 24 08:15:23 crc kubenswrapper[4691]: I1124 08:15:23.180654 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="35cf516f-ba01-46fe-97d5-36ae7b99f35e" containerName="nova-cell0-conductor-db-sync" Nov 24 08:15:23 crc kubenswrapper[4691]: I1124 08:15:23.181236 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 24 08:15:23 crc kubenswrapper[4691]: I1124 08:15:23.184146 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-s5244" Nov 24 08:15:23 crc kubenswrapper[4691]: I1124 08:15:23.197612 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 24 08:15:23 crc kubenswrapper[4691]: I1124 08:15:23.204331 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 24 08:15:23 crc kubenswrapper[4691]: I1124 08:15:23.268721 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l24ns\" (UniqueName: \"kubernetes.io/projected/80ba081a-de68-4111-8dd6-ec207b574dee-kube-api-access-l24ns\") pod \"nova-cell0-conductor-0\" (UID: \"80ba081a-de68-4111-8dd6-ec207b574dee\") " pod="openstack/nova-cell0-conductor-0" Nov 24 08:15:23 crc kubenswrapper[4691]: I1124 08:15:23.268867 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80ba081a-de68-4111-8dd6-ec207b574dee-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"80ba081a-de68-4111-8dd6-ec207b574dee\") " pod="openstack/nova-cell0-conductor-0" Nov 24 08:15:23 crc kubenswrapper[4691]: I1124 08:15:23.268902 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80ba081a-de68-4111-8dd6-ec207b574dee-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"80ba081a-de68-4111-8dd6-ec207b574dee\") " pod="openstack/nova-cell0-conductor-0" Nov 24 08:15:23 crc kubenswrapper[4691]: I1124 08:15:23.369867 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80ba081a-de68-4111-8dd6-ec207b574dee-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"80ba081a-de68-4111-8dd6-ec207b574dee\") " pod="openstack/nova-cell0-conductor-0" Nov 24 08:15:23 crc kubenswrapper[4691]: I1124 08:15:23.369918 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80ba081a-de68-4111-8dd6-ec207b574dee-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"80ba081a-de68-4111-8dd6-ec207b574dee\") " pod="openstack/nova-cell0-conductor-0" Nov 24 08:15:23 crc kubenswrapper[4691]: I1124 08:15:23.369988 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l24ns\" (UniqueName: \"kubernetes.io/projected/80ba081a-de68-4111-8dd6-ec207b574dee-kube-api-access-l24ns\") pod \"nova-cell0-conductor-0\" (UID: \"80ba081a-de68-4111-8dd6-ec207b574dee\") " pod="openstack/nova-cell0-conductor-0" Nov 24 08:15:23 crc kubenswrapper[4691]: I1124 08:15:23.374917 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80ba081a-de68-4111-8dd6-ec207b574dee-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"80ba081a-de68-4111-8dd6-ec207b574dee\") " pod="openstack/nova-cell0-conductor-0" Nov 24 08:15:23 crc kubenswrapper[4691]: I1124 08:15:23.381212 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80ba081a-de68-4111-8dd6-ec207b574dee-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"80ba081a-de68-4111-8dd6-ec207b574dee\") " pod="openstack/nova-cell0-conductor-0" Nov 24 08:15:23 crc kubenswrapper[4691]: I1124 08:15:23.399778 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l24ns\" (UniqueName: \"kubernetes.io/projected/80ba081a-de68-4111-8dd6-ec207b574dee-kube-api-access-l24ns\") pod \"nova-cell0-conductor-0\" (UID: \"80ba081a-de68-4111-8dd6-ec207b574dee\") " pod="openstack/nova-cell0-conductor-0" Nov 24 08:15:23 crc kubenswrapper[4691]: I1124 08:15:23.498005 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 24 08:15:24 crc kubenswrapper[4691]: I1124 08:15:24.023638 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 24 08:15:24 crc kubenswrapper[4691]: W1124 08:15:24.029649 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod80ba081a_de68_4111_8dd6_ec207b574dee.slice/crio-50bca7305183efe1f4e3088d9ae400d6faa3dd06c8f1e41bb9c76bc9039a129e WatchSource:0}: Error finding container 50bca7305183efe1f4e3088d9ae400d6faa3dd06c8f1e41bb9c76bc9039a129e: Status 404 returned error can't find the container with id 50bca7305183efe1f4e3088d9ae400d6faa3dd06c8f1e41bb9c76bc9039a129e Nov 24 08:15:24 crc kubenswrapper[4691]: I1124 08:15:24.132495 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"30afd527-ea6d-41a6-8f4d-8d60a2933f01","Type":"ContainerStarted","Data":"ed5a473f3c6bdb895459088c20032bf72e7c027e6be19c0f10e9bcdf4d05f5fb"} Nov 24 08:15:24 crc kubenswrapper[4691]: I1124 08:15:24.134306 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"80ba081a-de68-4111-8dd6-ec207b574dee","Type":"ContainerStarted","Data":"50bca7305183efe1f4e3088d9ae400d6faa3dd06c8f1e41bb9c76bc9039a129e"} Nov 24 08:15:25 crc kubenswrapper[4691]: I1124 08:15:25.144594 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"80ba081a-de68-4111-8dd6-ec207b574dee","Type":"ContainerStarted","Data":"377b17455ea657d9f8b0c8e6a8ed21ec36804df18897d694c4f6eb39aba0f0fc"} Nov 24 08:15:25 crc kubenswrapper[4691]: I1124 08:15:25.145346 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 24 08:15:25 crc kubenswrapper[4691]: I1124 08:15:25.147409 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"30afd527-ea6d-41a6-8f4d-8d60a2933f01","Type":"ContainerStarted","Data":"2354e2d01d0f268aa802c89d56dd6232d3a4567a07c94295a3db25887a830c57"} Nov 24 08:15:25 crc kubenswrapper[4691]: I1124 08:15:25.166569 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.166544087 podStartE2EDuration="2.166544087s" podCreationTimestamp="2025-11-24 08:15:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:15:25.1597735 +0000 UTC m=+1087.158722759" watchObservedRunningTime="2025-11-24 08:15:25.166544087 +0000 UTC m=+1087.165493336" Nov 24 08:15:27 crc kubenswrapper[4691]: I1124 08:15:27.168544 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"30afd527-ea6d-41a6-8f4d-8d60a2933f01","Type":"ContainerStarted","Data":"d98f0e72d5857f865a3c4d812c97005a26803913cc6b97a323713bc39a5352f4"} Nov 24 08:15:27 crc kubenswrapper[4691]: I1124 08:15:27.169139 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 24 08:15:27 crc kubenswrapper[4691]: I1124 08:15:27.204322 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.293521014 podStartE2EDuration="6.204286822s" podCreationTimestamp="2025-11-24 08:15:21 +0000 UTC" firstStartedPulling="2025-11-24 08:15:22.288227909 +0000 UTC m=+1084.287177158" lastFinishedPulling="2025-11-24 08:15:26.198993717 +0000 UTC m=+1088.197942966" observedRunningTime="2025-11-24 08:15:27.19593944 +0000 UTC m=+1089.194888689" watchObservedRunningTime="2025-11-24 08:15:27.204286822 +0000 UTC m=+1089.203236091" Nov 24 08:15:33 crc kubenswrapper[4691]: I1124 08:15:33.543214 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.106543 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-kkxd2"] Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.108162 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-kkxd2" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.112055 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.112519 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.120660 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-kkxd2"] Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.282578 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rmpl4\" (UniqueName: \"kubernetes.io/projected/f83a1f39-5338-46a1-96b8-384c34957916-kube-api-access-rmpl4\") pod \"nova-cell0-cell-mapping-kkxd2\" (UID: \"f83a1f39-5338-46a1-96b8-384c34957916\") " pod="openstack/nova-cell0-cell-mapping-kkxd2" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.282647 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f83a1f39-5338-46a1-96b8-384c34957916-scripts\") pod \"nova-cell0-cell-mapping-kkxd2\" (UID: \"f83a1f39-5338-46a1-96b8-384c34957916\") " pod="openstack/nova-cell0-cell-mapping-kkxd2" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.282704 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f83a1f39-5338-46a1-96b8-384c34957916-config-data\") pod \"nova-cell0-cell-mapping-kkxd2\" (UID: \"f83a1f39-5338-46a1-96b8-384c34957916\") " pod="openstack/nova-cell0-cell-mapping-kkxd2" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.282781 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f83a1f39-5338-46a1-96b8-384c34957916-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-kkxd2\" (UID: \"f83a1f39-5338-46a1-96b8-384c34957916\") " pod="openstack/nova-cell0-cell-mapping-kkxd2" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.347745 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.349426 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.358551 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.384982 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f83a1f39-5338-46a1-96b8-384c34957916-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-kkxd2\" (UID: \"f83a1f39-5338-46a1-96b8-384c34957916\") " pod="openstack/nova-cell0-cell-mapping-kkxd2" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.385059 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rmpl4\" (UniqueName: \"kubernetes.io/projected/f83a1f39-5338-46a1-96b8-384c34957916-kube-api-access-rmpl4\") pod \"nova-cell0-cell-mapping-kkxd2\" (UID: \"f83a1f39-5338-46a1-96b8-384c34957916\") " pod="openstack/nova-cell0-cell-mapping-kkxd2" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.385088 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f83a1f39-5338-46a1-96b8-384c34957916-scripts\") pod \"nova-cell0-cell-mapping-kkxd2\" (UID: \"f83a1f39-5338-46a1-96b8-384c34957916\") " pod="openstack/nova-cell0-cell-mapping-kkxd2" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.385141 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f83a1f39-5338-46a1-96b8-384c34957916-config-data\") pod \"nova-cell0-cell-mapping-kkxd2\" (UID: \"f83a1f39-5338-46a1-96b8-384c34957916\") " pod="openstack/nova-cell0-cell-mapping-kkxd2" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.386424 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.401281 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f83a1f39-5338-46a1-96b8-384c34957916-config-data\") pod \"nova-cell0-cell-mapping-kkxd2\" (UID: \"f83a1f39-5338-46a1-96b8-384c34957916\") " pod="openstack/nova-cell0-cell-mapping-kkxd2" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.401308 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f83a1f39-5338-46a1-96b8-384c34957916-scripts\") pod \"nova-cell0-cell-mapping-kkxd2\" (UID: \"f83a1f39-5338-46a1-96b8-384c34957916\") " pod="openstack/nova-cell0-cell-mapping-kkxd2" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.401392 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f83a1f39-5338-46a1-96b8-384c34957916-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-kkxd2\" (UID: \"f83a1f39-5338-46a1-96b8-384c34957916\") " pod="openstack/nova-cell0-cell-mapping-kkxd2" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.402397 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.406594 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.440131 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rmpl4\" (UniqueName: \"kubernetes.io/projected/f83a1f39-5338-46a1-96b8-384c34957916-kube-api-access-rmpl4\") pod \"nova-cell0-cell-mapping-kkxd2\" (UID: \"f83a1f39-5338-46a1-96b8-384c34957916\") " pod="openstack/nova-cell0-cell-mapping-kkxd2" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.472534 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.494353 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.518738 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z45vn\" (UniqueName: \"kubernetes.io/projected/fc2141ee-cbda-4296-a4a5-57d3725c2688-kube-api-access-z45vn\") pod \"nova-metadata-0\" (UID: \"fc2141ee-cbda-4296-a4a5-57d3725c2688\") " pod="openstack/nova-metadata-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.519261 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc2141ee-cbda-4296-a4a5-57d3725c2688-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"fc2141ee-cbda-4296-a4a5-57d3725c2688\") " pod="openstack/nova-metadata-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.519417 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc2141ee-cbda-4296-a4a5-57d3725c2688-config-data\") pod \"nova-metadata-0\" (UID: \"fc2141ee-cbda-4296-a4a5-57d3725c2688\") " pod="openstack/nova-metadata-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.519710 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wbdvz\" (UniqueName: \"kubernetes.io/projected/e2c432bd-9833-4e09-9253-46e11eb26503-kube-api-access-wbdvz\") pod \"nova-api-0\" (UID: \"e2c432bd-9833-4e09-9253-46e11eb26503\") " pod="openstack/nova-api-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.520011 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fc2141ee-cbda-4296-a4a5-57d3725c2688-logs\") pod \"nova-metadata-0\" (UID: \"fc2141ee-cbda-4296-a4a5-57d3725c2688\") " pod="openstack/nova-metadata-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.520168 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2c432bd-9833-4e09-9253-46e11eb26503-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e2c432bd-9833-4e09-9253-46e11eb26503\") " pod="openstack/nova-api-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.520294 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e2c432bd-9833-4e09-9253-46e11eb26503-logs\") pod \"nova-api-0\" (UID: \"e2c432bd-9833-4e09-9253-46e11eb26503\") " pod="openstack/nova-api-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.521022 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2c432bd-9833-4e09-9253-46e11eb26503-config-data\") pod \"nova-api-0\" (UID: \"e2c432bd-9833-4e09-9253-46e11eb26503\") " pod="openstack/nova-api-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.541419 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.541705 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.554980 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.610664 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.622318 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fc2141ee-cbda-4296-a4a5-57d3725c2688-logs\") pod \"nova-metadata-0\" (UID: \"fc2141ee-cbda-4296-a4a5-57d3725c2688\") " pod="openstack/nova-metadata-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.622374 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2c432bd-9833-4e09-9253-46e11eb26503-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e2c432bd-9833-4e09-9253-46e11eb26503\") " pod="openstack/nova-api-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.622394 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e2c432bd-9833-4e09-9253-46e11eb26503-logs\") pod \"nova-api-0\" (UID: \"e2c432bd-9833-4e09-9253-46e11eb26503\") " pod="openstack/nova-api-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.622416 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2c432bd-9833-4e09-9253-46e11eb26503-config-data\") pod \"nova-api-0\" (UID: \"e2c432bd-9833-4e09-9253-46e11eb26503\") " pod="openstack/nova-api-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.622474 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d3a4115-27a7-4d2a-a968-a04b5e99dd80-config-data\") pod \"nova-scheduler-0\" (UID: \"5d3a4115-27a7-4d2a-a968-a04b5e99dd80\") " pod="openstack/nova-scheduler-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.622529 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z45vn\" (UniqueName: \"kubernetes.io/projected/fc2141ee-cbda-4296-a4a5-57d3725c2688-kube-api-access-z45vn\") pod \"nova-metadata-0\" (UID: \"fc2141ee-cbda-4296-a4a5-57d3725c2688\") " pod="openstack/nova-metadata-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.622549 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d3a4115-27a7-4d2a-a968-a04b5e99dd80-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"5d3a4115-27a7-4d2a-a968-a04b5e99dd80\") " pod="openstack/nova-scheduler-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.622574 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc2141ee-cbda-4296-a4a5-57d3725c2688-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"fc2141ee-cbda-4296-a4a5-57d3725c2688\") " pod="openstack/nova-metadata-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.622594 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-86flb\" (UniqueName: \"kubernetes.io/projected/5d3a4115-27a7-4d2a-a968-a04b5e99dd80-kube-api-access-86flb\") pod \"nova-scheduler-0\" (UID: \"5d3a4115-27a7-4d2a-a968-a04b5e99dd80\") " pod="openstack/nova-scheduler-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.622614 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc2141ee-cbda-4296-a4a5-57d3725c2688-config-data\") pod \"nova-metadata-0\" (UID: \"fc2141ee-cbda-4296-a4a5-57d3725c2688\") " pod="openstack/nova-metadata-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.622650 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wbdvz\" (UniqueName: \"kubernetes.io/projected/e2c432bd-9833-4e09-9253-46e11eb26503-kube-api-access-wbdvz\") pod \"nova-api-0\" (UID: \"e2c432bd-9833-4e09-9253-46e11eb26503\") " pod="openstack/nova-api-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.623507 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e2c432bd-9833-4e09-9253-46e11eb26503-logs\") pod \"nova-api-0\" (UID: \"e2c432bd-9833-4e09-9253-46e11eb26503\") " pod="openstack/nova-api-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.626937 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fc2141ee-cbda-4296-a4a5-57d3725c2688-logs\") pod \"nova-metadata-0\" (UID: \"fc2141ee-cbda-4296-a4a5-57d3725c2688\") " pod="openstack/nova-metadata-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.627319 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2c432bd-9833-4e09-9253-46e11eb26503-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e2c432bd-9833-4e09-9253-46e11eb26503\") " pod="openstack/nova-api-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.633328 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2c432bd-9833-4e09-9253-46e11eb26503-config-data\") pod \"nova-api-0\" (UID: \"e2c432bd-9833-4e09-9253-46e11eb26503\") " pod="openstack/nova-api-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.638966 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc2141ee-cbda-4296-a4a5-57d3725c2688-config-data\") pod \"nova-metadata-0\" (UID: \"fc2141ee-cbda-4296-a4a5-57d3725c2688\") " pod="openstack/nova-metadata-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.639047 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.641008 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.642718 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wbdvz\" (UniqueName: \"kubernetes.io/projected/e2c432bd-9833-4e09-9253-46e11eb26503-kube-api-access-wbdvz\") pod \"nova-api-0\" (UID: \"e2c432bd-9833-4e09-9253-46e11eb26503\") " pod="openstack/nova-api-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.643433 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.645693 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z45vn\" (UniqueName: \"kubernetes.io/projected/fc2141ee-cbda-4296-a4a5-57d3725c2688-kube-api-access-z45vn\") pod \"nova-metadata-0\" (UID: \"fc2141ee-cbda-4296-a4a5-57d3725c2688\") " pod="openstack/nova-metadata-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.647932 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc2141ee-cbda-4296-a4a5-57d3725c2688-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"fc2141ee-cbda-4296-a4a5-57d3725c2688\") " pod="openstack/nova-metadata-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.665745 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.677580 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-mwltv"] Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.678860 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.679821 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-mwltv" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.685349 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-mwltv"] Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.726003 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d3a4115-27a7-4d2a-a968-a04b5e99dd80-config-data\") pod \"nova-scheduler-0\" (UID: \"5d3a4115-27a7-4d2a-a968-a04b5e99dd80\") " pod="openstack/nova-scheduler-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.726614 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-kkxd2" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.727017 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d3a4115-27a7-4d2a-a968-a04b5e99dd80-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"5d3a4115-27a7-4d2a-a968-a04b5e99dd80\") " pod="openstack/nova-scheduler-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.727069 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-86flb\" (UniqueName: \"kubernetes.io/projected/5d3a4115-27a7-4d2a-a968-a04b5e99dd80-kube-api-access-86flb\") pod \"nova-scheduler-0\" (UID: \"5d3a4115-27a7-4d2a-a968-a04b5e99dd80\") " pod="openstack/nova-scheduler-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.734255 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d3a4115-27a7-4d2a-a968-a04b5e99dd80-config-data\") pod \"nova-scheduler-0\" (UID: \"5d3a4115-27a7-4d2a-a968-a04b5e99dd80\") " pod="openstack/nova-scheduler-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.737991 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d3a4115-27a7-4d2a-a968-a04b5e99dd80-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"5d3a4115-27a7-4d2a-a968-a04b5e99dd80\") " pod="openstack/nova-scheduler-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.747757 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-86flb\" (UniqueName: \"kubernetes.io/projected/5d3a4115-27a7-4d2a-a968-a04b5e99dd80-kube-api-access-86flb\") pod \"nova-scheduler-0\" (UID: \"5d3a4115-27a7-4d2a-a968-a04b5e99dd80\") " pod="openstack/nova-scheduler-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.831717 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25cbad01-f9b2-4243-b6f9-7ba26a2454d4-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"25cbad01-f9b2-4243-b6f9-7ba26a2454d4\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.832184 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/415032f2-8952-4970-a28e-2c64f5f0206e-dns-swift-storage-0\") pod \"dnsmasq-dns-bccf8f775-mwltv\" (UID: \"415032f2-8952-4970-a28e-2c64f5f0206e\") " pod="openstack/dnsmasq-dns-bccf8f775-mwltv" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.832226 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-85ngw\" (UniqueName: \"kubernetes.io/projected/25cbad01-f9b2-4243-b6f9-7ba26a2454d4-kube-api-access-85ngw\") pod \"nova-cell1-novncproxy-0\" (UID: \"25cbad01-f9b2-4243-b6f9-7ba26a2454d4\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.832292 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/415032f2-8952-4970-a28e-2c64f5f0206e-dns-svc\") pod \"dnsmasq-dns-bccf8f775-mwltv\" (UID: \"415032f2-8952-4970-a28e-2c64f5f0206e\") " pod="openstack/dnsmasq-dns-bccf8f775-mwltv" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.832345 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8fmj8\" (UniqueName: \"kubernetes.io/projected/415032f2-8952-4970-a28e-2c64f5f0206e-kube-api-access-8fmj8\") pod \"dnsmasq-dns-bccf8f775-mwltv\" (UID: \"415032f2-8952-4970-a28e-2c64f5f0206e\") " pod="openstack/dnsmasq-dns-bccf8f775-mwltv" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.832393 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/415032f2-8952-4970-a28e-2c64f5f0206e-config\") pod \"dnsmasq-dns-bccf8f775-mwltv\" (UID: \"415032f2-8952-4970-a28e-2c64f5f0206e\") " pod="openstack/dnsmasq-dns-bccf8f775-mwltv" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.832466 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25cbad01-f9b2-4243-b6f9-7ba26a2454d4-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"25cbad01-f9b2-4243-b6f9-7ba26a2454d4\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.832489 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/415032f2-8952-4970-a28e-2c64f5f0206e-ovsdbserver-nb\") pod \"dnsmasq-dns-bccf8f775-mwltv\" (UID: \"415032f2-8952-4970-a28e-2c64f5f0206e\") " pod="openstack/dnsmasq-dns-bccf8f775-mwltv" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.832722 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/415032f2-8952-4970-a28e-2c64f5f0206e-ovsdbserver-sb\") pod \"dnsmasq-dns-bccf8f775-mwltv\" (UID: \"415032f2-8952-4970-a28e-2c64f5f0206e\") " pod="openstack/dnsmasq-dns-bccf8f775-mwltv" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.857999 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.900242 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.934951 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/415032f2-8952-4970-a28e-2c64f5f0206e-ovsdbserver-sb\") pod \"dnsmasq-dns-bccf8f775-mwltv\" (UID: \"415032f2-8952-4970-a28e-2c64f5f0206e\") " pod="openstack/dnsmasq-dns-bccf8f775-mwltv" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.935008 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25cbad01-f9b2-4243-b6f9-7ba26a2454d4-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"25cbad01-f9b2-4243-b6f9-7ba26a2454d4\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.935028 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/415032f2-8952-4970-a28e-2c64f5f0206e-dns-swift-storage-0\") pod \"dnsmasq-dns-bccf8f775-mwltv\" (UID: \"415032f2-8952-4970-a28e-2c64f5f0206e\") " pod="openstack/dnsmasq-dns-bccf8f775-mwltv" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.935053 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-85ngw\" (UniqueName: \"kubernetes.io/projected/25cbad01-f9b2-4243-b6f9-7ba26a2454d4-kube-api-access-85ngw\") pod \"nova-cell1-novncproxy-0\" (UID: \"25cbad01-f9b2-4243-b6f9-7ba26a2454d4\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.935095 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/415032f2-8952-4970-a28e-2c64f5f0206e-dns-svc\") pod \"dnsmasq-dns-bccf8f775-mwltv\" (UID: \"415032f2-8952-4970-a28e-2c64f5f0206e\") " pod="openstack/dnsmasq-dns-bccf8f775-mwltv" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.935132 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8fmj8\" (UniqueName: \"kubernetes.io/projected/415032f2-8952-4970-a28e-2c64f5f0206e-kube-api-access-8fmj8\") pod \"dnsmasq-dns-bccf8f775-mwltv\" (UID: \"415032f2-8952-4970-a28e-2c64f5f0206e\") " pod="openstack/dnsmasq-dns-bccf8f775-mwltv" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.935166 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/415032f2-8952-4970-a28e-2c64f5f0206e-config\") pod \"dnsmasq-dns-bccf8f775-mwltv\" (UID: \"415032f2-8952-4970-a28e-2c64f5f0206e\") " pod="openstack/dnsmasq-dns-bccf8f775-mwltv" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.935206 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25cbad01-f9b2-4243-b6f9-7ba26a2454d4-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"25cbad01-f9b2-4243-b6f9-7ba26a2454d4\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.935225 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/415032f2-8952-4970-a28e-2c64f5f0206e-ovsdbserver-nb\") pod \"dnsmasq-dns-bccf8f775-mwltv\" (UID: \"415032f2-8952-4970-a28e-2c64f5f0206e\") " pod="openstack/dnsmasq-dns-bccf8f775-mwltv" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.936070 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/415032f2-8952-4970-a28e-2c64f5f0206e-ovsdbserver-nb\") pod \"dnsmasq-dns-bccf8f775-mwltv\" (UID: \"415032f2-8952-4970-a28e-2c64f5f0206e\") " pod="openstack/dnsmasq-dns-bccf8f775-mwltv" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.936670 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/415032f2-8952-4970-a28e-2c64f5f0206e-dns-swift-storage-0\") pod \"dnsmasq-dns-bccf8f775-mwltv\" (UID: \"415032f2-8952-4970-a28e-2c64f5f0206e\") " pod="openstack/dnsmasq-dns-bccf8f775-mwltv" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.936736 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/415032f2-8952-4970-a28e-2c64f5f0206e-ovsdbserver-sb\") pod \"dnsmasq-dns-bccf8f775-mwltv\" (UID: \"415032f2-8952-4970-a28e-2c64f5f0206e\") " pod="openstack/dnsmasq-dns-bccf8f775-mwltv" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.937120 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/415032f2-8952-4970-a28e-2c64f5f0206e-dns-svc\") pod \"dnsmasq-dns-bccf8f775-mwltv\" (UID: \"415032f2-8952-4970-a28e-2c64f5f0206e\") " pod="openstack/dnsmasq-dns-bccf8f775-mwltv" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.937884 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/415032f2-8952-4970-a28e-2c64f5f0206e-config\") pod \"dnsmasq-dns-bccf8f775-mwltv\" (UID: \"415032f2-8952-4970-a28e-2c64f5f0206e\") " pod="openstack/dnsmasq-dns-bccf8f775-mwltv" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.943014 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25cbad01-f9b2-4243-b6f9-7ba26a2454d4-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"25cbad01-f9b2-4243-b6f9-7ba26a2454d4\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.943014 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25cbad01-f9b2-4243-b6f9-7ba26a2454d4-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"25cbad01-f9b2-4243-b6f9-7ba26a2454d4\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.955129 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8fmj8\" (UniqueName: \"kubernetes.io/projected/415032f2-8952-4970-a28e-2c64f5f0206e-kube-api-access-8fmj8\") pod \"dnsmasq-dns-bccf8f775-mwltv\" (UID: \"415032f2-8952-4970-a28e-2c64f5f0206e\") " pod="openstack/dnsmasq-dns-bccf8f775-mwltv" Nov 24 08:15:34 crc kubenswrapper[4691]: I1124 08:15:34.955795 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-85ngw\" (UniqueName: \"kubernetes.io/projected/25cbad01-f9b2-4243-b6f9-7ba26a2454d4-kube-api-access-85ngw\") pod \"nova-cell1-novncproxy-0\" (UID: \"25cbad01-f9b2-4243-b6f9-7ba26a2454d4\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:15:35 crc kubenswrapper[4691]: I1124 08:15:35.166545 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:15:35 crc kubenswrapper[4691]: I1124 08:15:35.194262 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-mwltv" Nov 24 08:15:35 crc kubenswrapper[4691]: I1124 08:15:35.202808 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-kkxd2"] Nov 24 08:15:35 crc kubenswrapper[4691]: I1124 08:15:35.254403 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-kkxd2" event={"ID":"f83a1f39-5338-46a1-96b8-384c34957916","Type":"ContainerStarted","Data":"6939168a094c11929eae9a52a20182e5e343c6f20510faff62abbc84f0d5faa0"} Nov 24 08:15:35 crc kubenswrapper[4691]: I1124 08:15:35.269704 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 24 08:15:35 crc kubenswrapper[4691]: W1124 08:15:35.282359 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode2c432bd_9833_4e09_9253_46e11eb26503.slice/crio-229ee24775b5ee15e1c3565f7b574350b62c7cbd1102ac5a5455da2d32006f2b WatchSource:0}: Error finding container 229ee24775b5ee15e1c3565f7b574350b62c7cbd1102ac5a5455da2d32006f2b: Status 404 returned error can't find the container with id 229ee24775b5ee15e1c3565f7b574350b62c7cbd1102ac5a5455da2d32006f2b Nov 24 08:15:35 crc kubenswrapper[4691]: I1124 08:15:35.435985 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-t62jq"] Nov 24 08:15:35 crc kubenswrapper[4691]: I1124 08:15:35.438299 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-t62jq" Nov 24 08:15:35 crc kubenswrapper[4691]: I1124 08:15:35.443923 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 24 08:15:35 crc kubenswrapper[4691]: I1124 08:15:35.444101 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 24 08:15:35 crc kubenswrapper[4691]: I1124 08:15:35.446902 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-t62jq"] Nov 24 08:15:35 crc kubenswrapper[4691]: I1124 08:15:35.526305 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 08:15:35 crc kubenswrapper[4691]: I1124 08:15:35.559991 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c4c6180-da05-43a3-9f3b-e689e86cb2ac-config-data\") pod \"nova-cell1-conductor-db-sync-t62jq\" (UID: \"3c4c6180-da05-43a3-9f3b-e689e86cb2ac\") " pod="openstack/nova-cell1-conductor-db-sync-t62jq" Nov 24 08:15:35 crc kubenswrapper[4691]: I1124 08:15:35.560095 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c4c6180-da05-43a3-9f3b-e689e86cb2ac-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-t62jq\" (UID: \"3c4c6180-da05-43a3-9f3b-e689e86cb2ac\") " pod="openstack/nova-cell1-conductor-db-sync-t62jq" Nov 24 08:15:35 crc kubenswrapper[4691]: I1124 08:15:35.560188 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c4c6180-da05-43a3-9f3b-e689e86cb2ac-scripts\") pod \"nova-cell1-conductor-db-sync-t62jq\" (UID: \"3c4c6180-da05-43a3-9f3b-e689e86cb2ac\") " pod="openstack/nova-cell1-conductor-db-sync-t62jq" Nov 24 08:15:35 crc kubenswrapper[4691]: I1124 08:15:35.560264 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7mgsp\" (UniqueName: \"kubernetes.io/projected/3c4c6180-da05-43a3-9f3b-e689e86cb2ac-kube-api-access-7mgsp\") pod \"nova-cell1-conductor-db-sync-t62jq\" (UID: \"3c4c6180-da05-43a3-9f3b-e689e86cb2ac\") " pod="openstack/nova-cell1-conductor-db-sync-t62jq" Nov 24 08:15:35 crc kubenswrapper[4691]: I1124 08:15:35.661946 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c4c6180-da05-43a3-9f3b-e689e86cb2ac-scripts\") pod \"nova-cell1-conductor-db-sync-t62jq\" (UID: \"3c4c6180-da05-43a3-9f3b-e689e86cb2ac\") " pod="openstack/nova-cell1-conductor-db-sync-t62jq" Nov 24 08:15:35 crc kubenswrapper[4691]: I1124 08:15:35.663107 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7mgsp\" (UniqueName: \"kubernetes.io/projected/3c4c6180-da05-43a3-9f3b-e689e86cb2ac-kube-api-access-7mgsp\") pod \"nova-cell1-conductor-db-sync-t62jq\" (UID: \"3c4c6180-da05-43a3-9f3b-e689e86cb2ac\") " pod="openstack/nova-cell1-conductor-db-sync-t62jq" Nov 24 08:15:35 crc kubenswrapper[4691]: I1124 08:15:35.663237 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c4c6180-da05-43a3-9f3b-e689e86cb2ac-config-data\") pod \"nova-cell1-conductor-db-sync-t62jq\" (UID: \"3c4c6180-da05-43a3-9f3b-e689e86cb2ac\") " pod="openstack/nova-cell1-conductor-db-sync-t62jq" Nov 24 08:15:35 crc kubenswrapper[4691]: I1124 08:15:35.663541 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c4c6180-da05-43a3-9f3b-e689e86cb2ac-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-t62jq\" (UID: \"3c4c6180-da05-43a3-9f3b-e689e86cb2ac\") " pod="openstack/nova-cell1-conductor-db-sync-t62jq" Nov 24 08:15:35 crc kubenswrapper[4691]: I1124 08:15:35.670898 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c4c6180-da05-43a3-9f3b-e689e86cb2ac-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-t62jq\" (UID: \"3c4c6180-da05-43a3-9f3b-e689e86cb2ac\") " pod="openstack/nova-cell1-conductor-db-sync-t62jq" Nov 24 08:15:35 crc kubenswrapper[4691]: I1124 08:15:35.670978 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c4c6180-da05-43a3-9f3b-e689e86cb2ac-config-data\") pod \"nova-cell1-conductor-db-sync-t62jq\" (UID: \"3c4c6180-da05-43a3-9f3b-e689e86cb2ac\") " pod="openstack/nova-cell1-conductor-db-sync-t62jq" Nov 24 08:15:35 crc kubenswrapper[4691]: I1124 08:15:35.671429 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c4c6180-da05-43a3-9f3b-e689e86cb2ac-scripts\") pod \"nova-cell1-conductor-db-sync-t62jq\" (UID: \"3c4c6180-da05-43a3-9f3b-e689e86cb2ac\") " pod="openstack/nova-cell1-conductor-db-sync-t62jq" Nov 24 08:15:35 crc kubenswrapper[4691]: I1124 08:15:35.709213 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7mgsp\" (UniqueName: \"kubernetes.io/projected/3c4c6180-da05-43a3-9f3b-e689e86cb2ac-kube-api-access-7mgsp\") pod \"nova-cell1-conductor-db-sync-t62jq\" (UID: \"3c4c6180-da05-43a3-9f3b-e689e86cb2ac\") " pod="openstack/nova-cell1-conductor-db-sync-t62jq" Nov 24 08:15:35 crc kubenswrapper[4691]: I1124 08:15:35.714566 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 08:15:35 crc kubenswrapper[4691]: I1124 08:15:35.806220 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-mwltv"] Nov 24 08:15:35 crc kubenswrapper[4691]: W1124 08:15:35.826675 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod25cbad01_f9b2_4243_b6f9_7ba26a2454d4.slice/crio-d072fabc73a5a92bd6212e29bf268d45f9c7c3d7dab4ebd20e5aca431f7ef2f3 WatchSource:0}: Error finding container d072fabc73a5a92bd6212e29bf268d45f9c7c3d7dab4ebd20e5aca431f7ef2f3: Status 404 returned error can't find the container with id d072fabc73a5a92bd6212e29bf268d45f9c7c3d7dab4ebd20e5aca431f7ef2f3 Nov 24 08:15:35 crc kubenswrapper[4691]: I1124 08:15:35.831297 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 24 08:15:35 crc kubenswrapper[4691]: I1124 08:15:35.900043 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-t62jq" Nov 24 08:15:36 crc kubenswrapper[4691]: I1124 08:15:36.283341 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"25cbad01-f9b2-4243-b6f9-7ba26a2454d4","Type":"ContainerStarted","Data":"d072fabc73a5a92bd6212e29bf268d45f9c7c3d7dab4ebd20e5aca431f7ef2f3"} Nov 24 08:15:36 crc kubenswrapper[4691]: I1124 08:15:36.285958 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"fc2141ee-cbda-4296-a4a5-57d3725c2688","Type":"ContainerStarted","Data":"0df6c97a00d0cfb2370cf22e6bf7957a1ac6d141d7f5745711d327728eb55d02"} Nov 24 08:15:36 crc kubenswrapper[4691]: I1124 08:15:36.292243 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-mwltv" event={"ID":"415032f2-8952-4970-a28e-2c64f5f0206e","Type":"ContainerDied","Data":"112ee9e3b9ab9f771155668e7201b3806c6010c2c47528913e67c5d87d64ce15"} Nov 24 08:15:36 crc kubenswrapper[4691]: I1124 08:15:36.292178 4691 generic.go:334] "Generic (PLEG): container finished" podID="415032f2-8952-4970-a28e-2c64f5f0206e" containerID="112ee9e3b9ab9f771155668e7201b3806c6010c2c47528913e67c5d87d64ce15" exitCode=0 Nov 24 08:15:36 crc kubenswrapper[4691]: I1124 08:15:36.292437 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-mwltv" event={"ID":"415032f2-8952-4970-a28e-2c64f5f0206e","Type":"ContainerStarted","Data":"f9493f9e7b046f44230d22a0149d7504f622816eab66d90c70e7ac1279d08b95"} Nov 24 08:15:36 crc kubenswrapper[4691]: I1124 08:15:36.295552 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"5d3a4115-27a7-4d2a-a968-a04b5e99dd80","Type":"ContainerStarted","Data":"720ec97bea25be284cea46328715b05ee68e13cc9967bee6a14df495ed161e1d"} Nov 24 08:15:36 crc kubenswrapper[4691]: I1124 08:15:36.304837 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-kkxd2" event={"ID":"f83a1f39-5338-46a1-96b8-384c34957916","Type":"ContainerStarted","Data":"16e51f596472d04c6f21e3412401f30f340be3f7006a2574d3e2522a7b3fe82c"} Nov 24 08:15:36 crc kubenswrapper[4691]: I1124 08:15:36.308806 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e2c432bd-9833-4e09-9253-46e11eb26503","Type":"ContainerStarted","Data":"229ee24775b5ee15e1c3565f7b574350b62c7cbd1102ac5a5455da2d32006f2b"} Nov 24 08:15:36 crc kubenswrapper[4691]: I1124 08:15:36.340254 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-kkxd2" podStartSLOduration=2.34023141 podStartE2EDuration="2.34023141s" podCreationTimestamp="2025-11-24 08:15:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:15:36.334890325 +0000 UTC m=+1098.333839574" watchObservedRunningTime="2025-11-24 08:15:36.34023141 +0000 UTC m=+1098.339180669" Nov 24 08:15:36 crc kubenswrapper[4691]: I1124 08:15:36.489283 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-t62jq"] Nov 24 08:15:37 crc kubenswrapper[4691]: I1124 08:15:37.341896 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-mwltv" event={"ID":"415032f2-8952-4970-a28e-2c64f5f0206e","Type":"ContainerStarted","Data":"fe1a229e47e24656264ac3b2558fb3badfd425323c0a71efec3504f6b2abd81b"} Nov 24 08:15:37 crc kubenswrapper[4691]: I1124 08:15:37.343907 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-bccf8f775-mwltv" Nov 24 08:15:37 crc kubenswrapper[4691]: I1124 08:15:37.360140 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-t62jq" event={"ID":"3c4c6180-da05-43a3-9f3b-e689e86cb2ac","Type":"ContainerStarted","Data":"c01d57cb5ff8a868761f76d113b805887db7f7ae3f13bedc7eb1cde39b854ee6"} Nov 24 08:15:37 crc kubenswrapper[4691]: I1124 08:15:37.360193 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-t62jq" event={"ID":"3c4c6180-da05-43a3-9f3b-e689e86cb2ac","Type":"ContainerStarted","Data":"f0afe9e0dff7f9b64d4802353af4319d4af961e95f249831990821f585f9d617"} Nov 24 08:15:37 crc kubenswrapper[4691]: I1124 08:15:37.377952 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-bccf8f775-mwltv" podStartSLOduration=3.377927734 podStartE2EDuration="3.377927734s" podCreationTimestamp="2025-11-24 08:15:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:15:37.370164789 +0000 UTC m=+1099.369114038" watchObservedRunningTime="2025-11-24 08:15:37.377927734 +0000 UTC m=+1099.376876983" Nov 24 08:15:37 crc kubenswrapper[4691]: I1124 08:15:37.404217 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-t62jq" podStartSLOduration=2.404193165 podStartE2EDuration="2.404193165s" podCreationTimestamp="2025-11-24 08:15:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:15:37.392437114 +0000 UTC m=+1099.391386363" watchObservedRunningTime="2025-11-24 08:15:37.404193165 +0000 UTC m=+1099.403142414" Nov 24 08:15:38 crc kubenswrapper[4691]: I1124 08:15:38.129185 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 24 08:15:38 crc kubenswrapper[4691]: I1124 08:15:38.148191 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 08:15:40 crc kubenswrapper[4691]: I1124 08:15:40.410090 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"5d3a4115-27a7-4d2a-a968-a04b5e99dd80","Type":"ContainerStarted","Data":"5645154630f771f9a4e8f5c7d0b8b38ab36c5a2059e9154ee5d8bb3037231d13"} Nov 24 08:15:40 crc kubenswrapper[4691]: I1124 08:15:40.427808 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e2c432bd-9833-4e09-9253-46e11eb26503","Type":"ContainerStarted","Data":"a51c6d8e7c0182f52d7d7aea02bbb3081c09ffc1fc69423c420170742c6f4577"} Nov 24 08:15:40 crc kubenswrapper[4691]: I1124 08:15:40.427869 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e2c432bd-9833-4e09-9253-46e11eb26503","Type":"ContainerStarted","Data":"e81167fd4858f96c299e27357eec2548db1e66910dbcd954d4b60f6ab5cb54e1"} Nov 24 08:15:40 crc kubenswrapper[4691]: I1124 08:15:40.447655 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.886102181 podStartE2EDuration="6.447635207s" podCreationTimestamp="2025-11-24 08:15:34 +0000 UTC" firstStartedPulling="2025-11-24 08:15:35.711881011 +0000 UTC m=+1097.710830260" lastFinishedPulling="2025-11-24 08:15:39.273414037 +0000 UTC m=+1101.272363286" observedRunningTime="2025-11-24 08:15:40.443806116 +0000 UTC m=+1102.442755375" watchObservedRunningTime="2025-11-24 08:15:40.447635207 +0000 UTC m=+1102.446584456" Nov 24 08:15:40 crc kubenswrapper[4691]: I1124 08:15:40.453807 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"25cbad01-f9b2-4243-b6f9-7ba26a2454d4","Type":"ContainerStarted","Data":"2d499b3f75b3b87e72dbb3cd8794f749db484f33de9ca1db2a49b650b3ef8aa0"} Nov 24 08:15:40 crc kubenswrapper[4691]: I1124 08:15:40.454024 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="25cbad01-f9b2-4243-b6f9-7ba26a2454d4" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://2d499b3f75b3b87e72dbb3cd8794f749db484f33de9ca1db2a49b650b3ef8aa0" gracePeriod=30 Nov 24 08:15:40 crc kubenswrapper[4691]: I1124 08:15:40.472010 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"fc2141ee-cbda-4296-a4a5-57d3725c2688","Type":"ContainerStarted","Data":"581dfe0ef52254ad345c93b42a1e294bda5b96d04c7a130eaf193267f7e1f7be"} Nov 24 08:15:40 crc kubenswrapper[4691]: I1124 08:15:40.472077 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"fc2141ee-cbda-4296-a4a5-57d3725c2688","Type":"ContainerStarted","Data":"54c72b8b347eff851a4ba851606486ad587558dfd38f9df018b4dd5d0427a8fc"} Nov 24 08:15:40 crc kubenswrapper[4691]: I1124 08:15:40.472267 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="fc2141ee-cbda-4296-a4a5-57d3725c2688" containerName="nova-metadata-log" containerID="cri-o://54c72b8b347eff851a4ba851606486ad587558dfd38f9df018b4dd5d0427a8fc" gracePeriod=30 Nov 24 08:15:40 crc kubenswrapper[4691]: I1124 08:15:40.472590 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="fc2141ee-cbda-4296-a4a5-57d3725c2688" containerName="nova-metadata-metadata" containerID="cri-o://581dfe0ef52254ad345c93b42a1e294bda5b96d04c7a130eaf193267f7e1f7be" gracePeriod=30 Nov 24 08:15:40 crc kubenswrapper[4691]: I1124 08:15:40.487600 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.4912752879999998 podStartE2EDuration="6.487568714s" podCreationTimestamp="2025-11-24 08:15:34 +0000 UTC" firstStartedPulling="2025-11-24 08:15:35.28502408 +0000 UTC m=+1097.283973329" lastFinishedPulling="2025-11-24 08:15:39.281317506 +0000 UTC m=+1101.280266755" observedRunningTime="2025-11-24 08:15:40.477783791 +0000 UTC m=+1102.476733050" watchObservedRunningTime="2025-11-24 08:15:40.487568714 +0000 UTC m=+1102.486517963" Nov 24 08:15:40 crc kubenswrapper[4691]: I1124 08:15:40.521503 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.78257895 podStartE2EDuration="6.521471997s" podCreationTimestamp="2025-11-24 08:15:34 +0000 UTC" firstStartedPulling="2025-11-24 08:15:35.542262545 +0000 UTC m=+1097.541211794" lastFinishedPulling="2025-11-24 08:15:39.281155592 +0000 UTC m=+1101.280104841" observedRunningTime="2025-11-24 08:15:40.508963754 +0000 UTC m=+1102.507913003" watchObservedRunningTime="2025-11-24 08:15:40.521471997 +0000 UTC m=+1102.520421246" Nov 24 08:15:40 crc kubenswrapper[4691]: I1124 08:15:40.585017 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=3.132995606 podStartE2EDuration="6.584986088s" podCreationTimestamp="2025-11-24 08:15:34 +0000 UTC" firstStartedPulling="2025-11-24 08:15:35.829142099 +0000 UTC m=+1097.828091348" lastFinishedPulling="2025-11-24 08:15:39.281132541 +0000 UTC m=+1101.280081830" observedRunningTime="2025-11-24 08:15:40.548910812 +0000 UTC m=+1102.547860051" watchObservedRunningTime="2025-11-24 08:15:40.584986088 +0000 UTC m=+1102.583935337" Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.439576 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.487442 4691 generic.go:334] "Generic (PLEG): container finished" podID="fc2141ee-cbda-4296-a4a5-57d3725c2688" containerID="581dfe0ef52254ad345c93b42a1e294bda5b96d04c7a130eaf193267f7e1f7be" exitCode=0 Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.487526 4691 generic.go:334] "Generic (PLEG): container finished" podID="fc2141ee-cbda-4296-a4a5-57d3725c2688" containerID="54c72b8b347eff851a4ba851606486ad587558dfd38f9df018b4dd5d0427a8fc" exitCode=143 Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.487550 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"fc2141ee-cbda-4296-a4a5-57d3725c2688","Type":"ContainerDied","Data":"581dfe0ef52254ad345c93b42a1e294bda5b96d04c7a130eaf193267f7e1f7be"} Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.487611 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"fc2141ee-cbda-4296-a4a5-57d3725c2688","Type":"ContainerDied","Data":"54c72b8b347eff851a4ba851606486ad587558dfd38f9df018b4dd5d0427a8fc"} Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.487626 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"fc2141ee-cbda-4296-a4a5-57d3725c2688","Type":"ContainerDied","Data":"0df6c97a00d0cfb2370cf22e6bf7957a1ac6d141d7f5745711d327728eb55d02"} Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.487645 4691 scope.go:117] "RemoveContainer" containerID="581dfe0ef52254ad345c93b42a1e294bda5b96d04c7a130eaf193267f7e1f7be" Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.488201 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.523063 4691 scope.go:117] "RemoveContainer" containerID="54c72b8b347eff851a4ba851606486ad587558dfd38f9df018b4dd5d0427a8fc" Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.564189 4691 scope.go:117] "RemoveContainer" containerID="581dfe0ef52254ad345c93b42a1e294bda5b96d04c7a130eaf193267f7e1f7be" Nov 24 08:15:41 crc kubenswrapper[4691]: E1124 08:15:41.564871 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"581dfe0ef52254ad345c93b42a1e294bda5b96d04c7a130eaf193267f7e1f7be\": container with ID starting with 581dfe0ef52254ad345c93b42a1e294bda5b96d04c7a130eaf193267f7e1f7be not found: ID does not exist" containerID="581dfe0ef52254ad345c93b42a1e294bda5b96d04c7a130eaf193267f7e1f7be" Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.564933 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"581dfe0ef52254ad345c93b42a1e294bda5b96d04c7a130eaf193267f7e1f7be"} err="failed to get container status \"581dfe0ef52254ad345c93b42a1e294bda5b96d04c7a130eaf193267f7e1f7be\": rpc error: code = NotFound desc = could not find container \"581dfe0ef52254ad345c93b42a1e294bda5b96d04c7a130eaf193267f7e1f7be\": container with ID starting with 581dfe0ef52254ad345c93b42a1e294bda5b96d04c7a130eaf193267f7e1f7be not found: ID does not exist" Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.564963 4691 scope.go:117] "RemoveContainer" containerID="54c72b8b347eff851a4ba851606486ad587558dfd38f9df018b4dd5d0427a8fc" Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.565624 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fc2141ee-cbda-4296-a4a5-57d3725c2688-logs" (OuterVolumeSpecName: "logs") pod "fc2141ee-cbda-4296-a4a5-57d3725c2688" (UID: "fc2141ee-cbda-4296-a4a5-57d3725c2688"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:15:41 crc kubenswrapper[4691]: E1124 08:15:41.565830 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"54c72b8b347eff851a4ba851606486ad587558dfd38f9df018b4dd5d0427a8fc\": container with ID starting with 54c72b8b347eff851a4ba851606486ad587558dfd38f9df018b4dd5d0427a8fc not found: ID does not exist" containerID="54c72b8b347eff851a4ba851606486ad587558dfd38f9df018b4dd5d0427a8fc" Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.565880 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54c72b8b347eff851a4ba851606486ad587558dfd38f9df018b4dd5d0427a8fc"} err="failed to get container status \"54c72b8b347eff851a4ba851606486ad587558dfd38f9df018b4dd5d0427a8fc\": rpc error: code = NotFound desc = could not find container \"54c72b8b347eff851a4ba851606486ad587558dfd38f9df018b4dd5d0427a8fc\": container with ID starting with 54c72b8b347eff851a4ba851606486ad587558dfd38f9df018b4dd5d0427a8fc not found: ID does not exist" Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.565906 4691 scope.go:117] "RemoveContainer" containerID="581dfe0ef52254ad345c93b42a1e294bda5b96d04c7a130eaf193267f7e1f7be" Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.566097 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fc2141ee-cbda-4296-a4a5-57d3725c2688-logs\") pod \"fc2141ee-cbda-4296-a4a5-57d3725c2688\" (UID: \"fc2141ee-cbda-4296-a4a5-57d3725c2688\") " Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.566255 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc2141ee-cbda-4296-a4a5-57d3725c2688-config-data\") pod \"fc2141ee-cbda-4296-a4a5-57d3725c2688\" (UID: \"fc2141ee-cbda-4296-a4a5-57d3725c2688\") " Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.566282 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"581dfe0ef52254ad345c93b42a1e294bda5b96d04c7a130eaf193267f7e1f7be"} err="failed to get container status \"581dfe0ef52254ad345c93b42a1e294bda5b96d04c7a130eaf193267f7e1f7be\": rpc error: code = NotFound desc = could not find container \"581dfe0ef52254ad345c93b42a1e294bda5b96d04c7a130eaf193267f7e1f7be\": container with ID starting with 581dfe0ef52254ad345c93b42a1e294bda5b96d04c7a130eaf193267f7e1f7be not found: ID does not exist" Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.566310 4691 scope.go:117] "RemoveContainer" containerID="54c72b8b347eff851a4ba851606486ad587558dfd38f9df018b4dd5d0427a8fc" Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.566356 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc2141ee-cbda-4296-a4a5-57d3725c2688-combined-ca-bundle\") pod \"fc2141ee-cbda-4296-a4a5-57d3725c2688\" (UID: \"fc2141ee-cbda-4296-a4a5-57d3725c2688\") " Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.567286 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54c72b8b347eff851a4ba851606486ad587558dfd38f9df018b4dd5d0427a8fc"} err="failed to get container status \"54c72b8b347eff851a4ba851606486ad587558dfd38f9df018b4dd5d0427a8fc\": rpc error: code = NotFound desc = could not find container \"54c72b8b347eff851a4ba851606486ad587558dfd38f9df018b4dd5d0427a8fc\": container with ID starting with 54c72b8b347eff851a4ba851606486ad587558dfd38f9df018b4dd5d0427a8fc not found: ID does not exist" Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.567554 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z45vn\" (UniqueName: \"kubernetes.io/projected/fc2141ee-cbda-4296-a4a5-57d3725c2688-kube-api-access-z45vn\") pod \"fc2141ee-cbda-4296-a4a5-57d3725c2688\" (UID: \"fc2141ee-cbda-4296-a4a5-57d3725c2688\") " Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.568295 4691 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fc2141ee-cbda-4296-a4a5-57d3725c2688-logs\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.573429 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc2141ee-cbda-4296-a4a5-57d3725c2688-kube-api-access-z45vn" (OuterVolumeSpecName: "kube-api-access-z45vn") pod "fc2141ee-cbda-4296-a4a5-57d3725c2688" (UID: "fc2141ee-cbda-4296-a4a5-57d3725c2688"). InnerVolumeSpecName "kube-api-access-z45vn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.599685 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc2141ee-cbda-4296-a4a5-57d3725c2688-config-data" (OuterVolumeSpecName: "config-data") pod "fc2141ee-cbda-4296-a4a5-57d3725c2688" (UID: "fc2141ee-cbda-4296-a4a5-57d3725c2688"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.600715 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc2141ee-cbda-4296-a4a5-57d3725c2688-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fc2141ee-cbda-4296-a4a5-57d3725c2688" (UID: "fc2141ee-cbda-4296-a4a5-57d3725c2688"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.670345 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z45vn\" (UniqueName: \"kubernetes.io/projected/fc2141ee-cbda-4296-a4a5-57d3725c2688-kube-api-access-z45vn\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.670379 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc2141ee-cbda-4296-a4a5-57d3725c2688-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.670389 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc2141ee-cbda-4296-a4a5-57d3725c2688-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.837876 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.847336 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.863601 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 24 08:15:41 crc kubenswrapper[4691]: E1124 08:15:41.864007 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc2141ee-cbda-4296-a4a5-57d3725c2688" containerName="nova-metadata-metadata" Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.864026 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc2141ee-cbda-4296-a4a5-57d3725c2688" containerName="nova-metadata-metadata" Nov 24 08:15:41 crc kubenswrapper[4691]: E1124 08:15:41.864068 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc2141ee-cbda-4296-a4a5-57d3725c2688" containerName="nova-metadata-log" Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.864075 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc2141ee-cbda-4296-a4a5-57d3725c2688" containerName="nova-metadata-log" Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.864260 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc2141ee-cbda-4296-a4a5-57d3725c2688" containerName="nova-metadata-metadata" Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.864280 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc2141ee-cbda-4296-a4a5-57d3725c2688" containerName="nova-metadata-log" Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.865248 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.868041 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.868955 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.882612 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.977584 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8712043e-e137-427a-b9bf-ac5e1b89304e-config-data\") pod \"nova-metadata-0\" (UID: \"8712043e-e137-427a-b9bf-ac5e1b89304e\") " pod="openstack/nova-metadata-0" Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.977754 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8712043e-e137-427a-b9bf-ac5e1b89304e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8712043e-e137-427a-b9bf-ac5e1b89304e\") " pod="openstack/nova-metadata-0" Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.977791 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gxrwj\" (UniqueName: \"kubernetes.io/projected/8712043e-e137-427a-b9bf-ac5e1b89304e-kube-api-access-gxrwj\") pod \"nova-metadata-0\" (UID: \"8712043e-e137-427a-b9bf-ac5e1b89304e\") " pod="openstack/nova-metadata-0" Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.977898 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8712043e-e137-427a-b9bf-ac5e1b89304e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"8712043e-e137-427a-b9bf-ac5e1b89304e\") " pod="openstack/nova-metadata-0" Nov 24 08:15:41 crc kubenswrapper[4691]: I1124 08:15:41.977936 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8712043e-e137-427a-b9bf-ac5e1b89304e-logs\") pod \"nova-metadata-0\" (UID: \"8712043e-e137-427a-b9bf-ac5e1b89304e\") " pod="openstack/nova-metadata-0" Nov 24 08:15:42 crc kubenswrapper[4691]: I1124 08:15:42.080227 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8712043e-e137-427a-b9bf-ac5e1b89304e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8712043e-e137-427a-b9bf-ac5e1b89304e\") " pod="openstack/nova-metadata-0" Nov 24 08:15:42 crc kubenswrapper[4691]: I1124 08:15:42.080300 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gxrwj\" (UniqueName: \"kubernetes.io/projected/8712043e-e137-427a-b9bf-ac5e1b89304e-kube-api-access-gxrwj\") pod \"nova-metadata-0\" (UID: \"8712043e-e137-427a-b9bf-ac5e1b89304e\") " pod="openstack/nova-metadata-0" Nov 24 08:15:42 crc kubenswrapper[4691]: I1124 08:15:42.080430 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8712043e-e137-427a-b9bf-ac5e1b89304e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"8712043e-e137-427a-b9bf-ac5e1b89304e\") " pod="openstack/nova-metadata-0" Nov 24 08:15:42 crc kubenswrapper[4691]: I1124 08:15:42.080499 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8712043e-e137-427a-b9bf-ac5e1b89304e-logs\") pod \"nova-metadata-0\" (UID: \"8712043e-e137-427a-b9bf-ac5e1b89304e\") " pod="openstack/nova-metadata-0" Nov 24 08:15:42 crc kubenswrapper[4691]: I1124 08:15:42.080623 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8712043e-e137-427a-b9bf-ac5e1b89304e-config-data\") pod \"nova-metadata-0\" (UID: \"8712043e-e137-427a-b9bf-ac5e1b89304e\") " pod="openstack/nova-metadata-0" Nov 24 08:15:42 crc kubenswrapper[4691]: I1124 08:15:42.081253 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8712043e-e137-427a-b9bf-ac5e1b89304e-logs\") pod \"nova-metadata-0\" (UID: \"8712043e-e137-427a-b9bf-ac5e1b89304e\") " pod="openstack/nova-metadata-0" Nov 24 08:15:42 crc kubenswrapper[4691]: I1124 08:15:42.085709 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8712043e-e137-427a-b9bf-ac5e1b89304e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"8712043e-e137-427a-b9bf-ac5e1b89304e\") " pod="openstack/nova-metadata-0" Nov 24 08:15:42 crc kubenswrapper[4691]: I1124 08:15:42.085944 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8712043e-e137-427a-b9bf-ac5e1b89304e-config-data\") pod \"nova-metadata-0\" (UID: \"8712043e-e137-427a-b9bf-ac5e1b89304e\") " pod="openstack/nova-metadata-0" Nov 24 08:15:42 crc kubenswrapper[4691]: I1124 08:15:42.098341 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8712043e-e137-427a-b9bf-ac5e1b89304e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8712043e-e137-427a-b9bf-ac5e1b89304e\") " pod="openstack/nova-metadata-0" Nov 24 08:15:42 crc kubenswrapper[4691]: I1124 08:15:42.098638 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gxrwj\" (UniqueName: \"kubernetes.io/projected/8712043e-e137-427a-b9bf-ac5e1b89304e-kube-api-access-gxrwj\") pod \"nova-metadata-0\" (UID: \"8712043e-e137-427a-b9bf-ac5e1b89304e\") " pod="openstack/nova-metadata-0" Nov 24 08:15:42 crc kubenswrapper[4691]: I1124 08:15:42.187571 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 08:15:42 crc kubenswrapper[4691]: I1124 08:15:42.679043 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 08:15:42 crc kubenswrapper[4691]: W1124 08:15:42.684418 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8712043e_e137_427a_b9bf_ac5e1b89304e.slice/crio-dfef136379c68642c165ad047bb4313a4ebb3d1ddb9c4d4bba6e03143559494a WatchSource:0}: Error finding container dfef136379c68642c165ad047bb4313a4ebb3d1ddb9c4d4bba6e03143559494a: Status 404 returned error can't find the container with id dfef136379c68642c165ad047bb4313a4ebb3d1ddb9c4d4bba6e03143559494a Nov 24 08:15:42 crc kubenswrapper[4691]: I1124 08:15:42.777430 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fc2141ee-cbda-4296-a4a5-57d3725c2688" path="/var/lib/kubelet/pods/fc2141ee-cbda-4296-a4a5-57d3725c2688/volumes" Nov 24 08:15:43 crc kubenswrapper[4691]: I1124 08:15:43.514062 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8712043e-e137-427a-b9bf-ac5e1b89304e","Type":"ContainerStarted","Data":"8eb0ff060b5df871483b0f662ea830e9f13d5f56c2743eccc33ee29cb03438f5"} Nov 24 08:15:43 crc kubenswrapper[4691]: I1124 08:15:43.514677 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8712043e-e137-427a-b9bf-ac5e1b89304e","Type":"ContainerStarted","Data":"781f08a3a82ffa396ad91a5b977313358238206c092c6548662f414943c841f8"} Nov 24 08:15:43 crc kubenswrapper[4691]: I1124 08:15:43.514712 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8712043e-e137-427a-b9bf-ac5e1b89304e","Type":"ContainerStarted","Data":"dfef136379c68642c165ad047bb4313a4ebb3d1ddb9c4d4bba6e03143559494a"} Nov 24 08:15:43 crc kubenswrapper[4691]: I1124 08:15:43.546343 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.54632142 podStartE2EDuration="2.54632142s" podCreationTimestamp="2025-11-24 08:15:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:15:43.537908597 +0000 UTC m=+1105.536857866" watchObservedRunningTime="2025-11-24 08:15:43.54632142 +0000 UTC m=+1105.545270669" Nov 24 08:15:44 crc kubenswrapper[4691]: I1124 08:15:44.680245 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 24 08:15:44 crc kubenswrapper[4691]: I1124 08:15:44.680653 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 24 08:15:44 crc kubenswrapper[4691]: I1124 08:15:44.901188 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 24 08:15:44 crc kubenswrapper[4691]: I1124 08:15:44.901255 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 24 08:15:44 crc kubenswrapper[4691]: I1124 08:15:44.934835 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 24 08:15:45 crc kubenswrapper[4691]: I1124 08:15:45.166866 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:15:45 crc kubenswrapper[4691]: I1124 08:15:45.195592 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-bccf8f775-mwltv" Nov 24 08:15:45 crc kubenswrapper[4691]: I1124 08:15:45.257409 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-cbv5w"] Nov 24 08:15:45 crc kubenswrapper[4691]: I1124 08:15:45.257662 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6578955fd5-cbv5w" podUID="425954a5-8127-4cbe-879e-ae3124e74ee6" containerName="dnsmasq-dns" containerID="cri-o://7a936e6272c238977cbd010fc647278165b67c42ef208ae2c7d79be1348f9703" gracePeriod=10 Nov 24 08:15:45 crc kubenswrapper[4691]: I1124 08:15:45.548937 4691 generic.go:334] "Generic (PLEG): container finished" podID="425954a5-8127-4cbe-879e-ae3124e74ee6" containerID="7a936e6272c238977cbd010fc647278165b67c42ef208ae2c7d79be1348f9703" exitCode=0 Nov 24 08:15:45 crc kubenswrapper[4691]: I1124 08:15:45.549023 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-cbv5w" event={"ID":"425954a5-8127-4cbe-879e-ae3124e74ee6","Type":"ContainerDied","Data":"7a936e6272c238977cbd010fc647278165b67c42ef208ae2c7d79be1348f9703"} Nov 24 08:15:45 crc kubenswrapper[4691]: I1124 08:15:45.551729 4691 generic.go:334] "Generic (PLEG): container finished" podID="f83a1f39-5338-46a1-96b8-384c34957916" containerID="16e51f596472d04c6f21e3412401f30f340be3f7006a2574d3e2522a7b3fe82c" exitCode=0 Nov 24 08:15:45 crc kubenswrapper[4691]: I1124 08:15:45.551782 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-kkxd2" event={"ID":"f83a1f39-5338-46a1-96b8-384c34957916","Type":"ContainerDied","Data":"16e51f596472d04c6f21e3412401f30f340be3f7006a2574d3e2522a7b3fe82c"} Nov 24 08:15:45 crc kubenswrapper[4691]: I1124 08:15:45.554307 4691 generic.go:334] "Generic (PLEG): container finished" podID="3c4c6180-da05-43a3-9f3b-e689e86cb2ac" containerID="c01d57cb5ff8a868761f76d113b805887db7f7ae3f13bedc7eb1cde39b854ee6" exitCode=0 Nov 24 08:15:45 crc kubenswrapper[4691]: I1124 08:15:45.554411 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-t62jq" event={"ID":"3c4c6180-da05-43a3-9f3b-e689e86cb2ac","Type":"ContainerDied","Data":"c01d57cb5ff8a868761f76d113b805887db7f7ae3f13bedc7eb1cde39b854ee6"} Nov 24 08:15:45 crc kubenswrapper[4691]: I1124 08:15:45.623220 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 24 08:15:45 crc kubenswrapper[4691]: I1124 08:15:45.720768 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="e2c432bd-9833-4e09-9253-46e11eb26503" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.190:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 24 08:15:45 crc kubenswrapper[4691]: I1124 08:15:45.764222 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="e2c432bd-9833-4e09-9253-46e11eb26503" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.190:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 24 08:15:45 crc kubenswrapper[4691]: I1124 08:15:45.799127 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-cbv5w" Nov 24 08:15:45 crc kubenswrapper[4691]: I1124 08:15:45.981626 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/425954a5-8127-4cbe-879e-ae3124e74ee6-dns-svc\") pod \"425954a5-8127-4cbe-879e-ae3124e74ee6\" (UID: \"425954a5-8127-4cbe-879e-ae3124e74ee6\") " Nov 24 08:15:45 crc kubenswrapper[4691]: I1124 08:15:45.981700 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/425954a5-8127-4cbe-879e-ae3124e74ee6-ovsdbserver-nb\") pod \"425954a5-8127-4cbe-879e-ae3124e74ee6\" (UID: \"425954a5-8127-4cbe-879e-ae3124e74ee6\") " Nov 24 08:15:45 crc kubenswrapper[4691]: I1124 08:15:45.981754 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/425954a5-8127-4cbe-879e-ae3124e74ee6-dns-swift-storage-0\") pod \"425954a5-8127-4cbe-879e-ae3124e74ee6\" (UID: \"425954a5-8127-4cbe-879e-ae3124e74ee6\") " Nov 24 08:15:45 crc kubenswrapper[4691]: I1124 08:15:45.981841 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/425954a5-8127-4cbe-879e-ae3124e74ee6-config\") pod \"425954a5-8127-4cbe-879e-ae3124e74ee6\" (UID: \"425954a5-8127-4cbe-879e-ae3124e74ee6\") " Nov 24 08:15:45 crc kubenswrapper[4691]: I1124 08:15:45.981918 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/425954a5-8127-4cbe-879e-ae3124e74ee6-ovsdbserver-sb\") pod \"425954a5-8127-4cbe-879e-ae3124e74ee6\" (UID: \"425954a5-8127-4cbe-879e-ae3124e74ee6\") " Nov 24 08:15:45 crc kubenswrapper[4691]: I1124 08:15:45.981961 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9zqcm\" (UniqueName: \"kubernetes.io/projected/425954a5-8127-4cbe-879e-ae3124e74ee6-kube-api-access-9zqcm\") pod \"425954a5-8127-4cbe-879e-ae3124e74ee6\" (UID: \"425954a5-8127-4cbe-879e-ae3124e74ee6\") " Nov 24 08:15:45 crc kubenswrapper[4691]: I1124 08:15:45.989020 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/425954a5-8127-4cbe-879e-ae3124e74ee6-kube-api-access-9zqcm" (OuterVolumeSpecName: "kube-api-access-9zqcm") pod "425954a5-8127-4cbe-879e-ae3124e74ee6" (UID: "425954a5-8127-4cbe-879e-ae3124e74ee6"). InnerVolumeSpecName "kube-api-access-9zqcm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:15:46 crc kubenswrapper[4691]: I1124 08:15:46.033819 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/425954a5-8127-4cbe-879e-ae3124e74ee6-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "425954a5-8127-4cbe-879e-ae3124e74ee6" (UID: "425954a5-8127-4cbe-879e-ae3124e74ee6"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:15:46 crc kubenswrapper[4691]: I1124 08:15:46.034080 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/425954a5-8127-4cbe-879e-ae3124e74ee6-config" (OuterVolumeSpecName: "config") pod "425954a5-8127-4cbe-879e-ae3124e74ee6" (UID: "425954a5-8127-4cbe-879e-ae3124e74ee6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:15:46 crc kubenswrapper[4691]: I1124 08:15:46.037919 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/425954a5-8127-4cbe-879e-ae3124e74ee6-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "425954a5-8127-4cbe-879e-ae3124e74ee6" (UID: "425954a5-8127-4cbe-879e-ae3124e74ee6"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:15:46 crc kubenswrapper[4691]: I1124 08:15:46.055741 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/425954a5-8127-4cbe-879e-ae3124e74ee6-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "425954a5-8127-4cbe-879e-ae3124e74ee6" (UID: "425954a5-8127-4cbe-879e-ae3124e74ee6"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:15:46 crc kubenswrapper[4691]: I1124 08:15:46.061880 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/425954a5-8127-4cbe-879e-ae3124e74ee6-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "425954a5-8127-4cbe-879e-ae3124e74ee6" (UID: "425954a5-8127-4cbe-879e-ae3124e74ee6"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:15:46 crc kubenswrapper[4691]: I1124 08:15:46.085233 4691 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/425954a5-8127-4cbe-879e-ae3124e74ee6-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:46 crc kubenswrapper[4691]: I1124 08:15:46.085282 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/425954a5-8127-4cbe-879e-ae3124e74ee6-config\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:46 crc kubenswrapper[4691]: I1124 08:15:46.085294 4691 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/425954a5-8127-4cbe-879e-ae3124e74ee6-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:46 crc kubenswrapper[4691]: I1124 08:15:46.085305 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9zqcm\" (UniqueName: \"kubernetes.io/projected/425954a5-8127-4cbe-879e-ae3124e74ee6-kube-api-access-9zqcm\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:46 crc kubenswrapper[4691]: I1124 08:15:46.085317 4691 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/425954a5-8127-4cbe-879e-ae3124e74ee6-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:46 crc kubenswrapper[4691]: I1124 08:15:46.085327 4691 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/425954a5-8127-4cbe-879e-ae3124e74ee6-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:46 crc kubenswrapper[4691]: I1124 08:15:46.566052 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-cbv5w" Nov 24 08:15:46 crc kubenswrapper[4691]: I1124 08:15:46.566104 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-cbv5w" event={"ID":"425954a5-8127-4cbe-879e-ae3124e74ee6","Type":"ContainerDied","Data":"b3b1644efc91f602b4db044c2ae11ee40e00e853563987e33b7800c728a64685"} Nov 24 08:15:46 crc kubenswrapper[4691]: I1124 08:15:46.566140 4691 scope.go:117] "RemoveContainer" containerID="7a936e6272c238977cbd010fc647278165b67c42ef208ae2c7d79be1348f9703" Nov 24 08:15:46 crc kubenswrapper[4691]: I1124 08:15:46.627192 4691 scope.go:117] "RemoveContainer" containerID="d22530161d0a79a36cea83bcd01898027a570e91d88f4f70fc786e6911a37a67" Nov 24 08:15:46 crc kubenswrapper[4691]: I1124 08:15:46.631713 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-cbv5w"] Nov 24 08:15:46 crc kubenswrapper[4691]: I1124 08:15:46.647770 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-cbv5w"] Nov 24 08:15:46 crc kubenswrapper[4691]: I1124 08:15:46.776333 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="425954a5-8127-4cbe-879e-ae3124e74ee6" path="/var/lib/kubelet/pods/425954a5-8127-4cbe-879e-ae3124e74ee6/volumes" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.085810 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-t62jq" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.091719 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-kkxd2" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.188379 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.188441 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.205574 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f83a1f39-5338-46a1-96b8-384c34957916-combined-ca-bundle\") pod \"f83a1f39-5338-46a1-96b8-384c34957916\" (UID: \"f83a1f39-5338-46a1-96b8-384c34957916\") " Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.205643 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f83a1f39-5338-46a1-96b8-384c34957916-config-data\") pod \"f83a1f39-5338-46a1-96b8-384c34957916\" (UID: \"f83a1f39-5338-46a1-96b8-384c34957916\") " Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.205696 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c4c6180-da05-43a3-9f3b-e689e86cb2ac-scripts\") pod \"3c4c6180-da05-43a3-9f3b-e689e86cb2ac\" (UID: \"3c4c6180-da05-43a3-9f3b-e689e86cb2ac\") " Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.205731 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f83a1f39-5338-46a1-96b8-384c34957916-scripts\") pod \"f83a1f39-5338-46a1-96b8-384c34957916\" (UID: \"f83a1f39-5338-46a1-96b8-384c34957916\") " Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.205872 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c4c6180-da05-43a3-9f3b-e689e86cb2ac-config-data\") pod \"3c4c6180-da05-43a3-9f3b-e689e86cb2ac\" (UID: \"3c4c6180-da05-43a3-9f3b-e689e86cb2ac\") " Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.205923 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c4c6180-da05-43a3-9f3b-e689e86cb2ac-combined-ca-bundle\") pod \"3c4c6180-da05-43a3-9f3b-e689e86cb2ac\" (UID: \"3c4c6180-da05-43a3-9f3b-e689e86cb2ac\") " Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.206016 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7mgsp\" (UniqueName: \"kubernetes.io/projected/3c4c6180-da05-43a3-9f3b-e689e86cb2ac-kube-api-access-7mgsp\") pod \"3c4c6180-da05-43a3-9f3b-e689e86cb2ac\" (UID: \"3c4c6180-da05-43a3-9f3b-e689e86cb2ac\") " Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.206101 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rmpl4\" (UniqueName: \"kubernetes.io/projected/f83a1f39-5338-46a1-96b8-384c34957916-kube-api-access-rmpl4\") pod \"f83a1f39-5338-46a1-96b8-384c34957916\" (UID: \"f83a1f39-5338-46a1-96b8-384c34957916\") " Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.229148 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f83a1f39-5338-46a1-96b8-384c34957916-scripts" (OuterVolumeSpecName: "scripts") pod "f83a1f39-5338-46a1-96b8-384c34957916" (UID: "f83a1f39-5338-46a1-96b8-384c34957916"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.229680 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c4c6180-da05-43a3-9f3b-e689e86cb2ac-kube-api-access-7mgsp" (OuterVolumeSpecName: "kube-api-access-7mgsp") pod "3c4c6180-da05-43a3-9f3b-e689e86cb2ac" (UID: "3c4c6180-da05-43a3-9f3b-e689e86cb2ac"). InnerVolumeSpecName "kube-api-access-7mgsp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.230274 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c4c6180-da05-43a3-9f3b-e689e86cb2ac-scripts" (OuterVolumeSpecName: "scripts") pod "3c4c6180-da05-43a3-9f3b-e689e86cb2ac" (UID: "3c4c6180-da05-43a3-9f3b-e689e86cb2ac"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.238514 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f83a1f39-5338-46a1-96b8-384c34957916-kube-api-access-rmpl4" (OuterVolumeSpecName: "kube-api-access-rmpl4") pod "f83a1f39-5338-46a1-96b8-384c34957916" (UID: "f83a1f39-5338-46a1-96b8-384c34957916"). InnerVolumeSpecName "kube-api-access-rmpl4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.242878 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f83a1f39-5338-46a1-96b8-384c34957916-config-data" (OuterVolumeSpecName: "config-data") pod "f83a1f39-5338-46a1-96b8-384c34957916" (UID: "f83a1f39-5338-46a1-96b8-384c34957916"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.251173 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f83a1f39-5338-46a1-96b8-384c34957916-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f83a1f39-5338-46a1-96b8-384c34957916" (UID: "f83a1f39-5338-46a1-96b8-384c34957916"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.252401 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c4c6180-da05-43a3-9f3b-e689e86cb2ac-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3c4c6180-da05-43a3-9f3b-e689e86cb2ac" (UID: "3c4c6180-da05-43a3-9f3b-e689e86cb2ac"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.266312 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c4c6180-da05-43a3-9f3b-e689e86cb2ac-config-data" (OuterVolumeSpecName: "config-data") pod "3c4c6180-da05-43a3-9f3b-e689e86cb2ac" (UID: "3c4c6180-da05-43a3-9f3b-e689e86cb2ac"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.308356 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c4c6180-da05-43a3-9f3b-e689e86cb2ac-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.308398 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c4c6180-da05-43a3-9f3b-e689e86cb2ac-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.308412 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7mgsp\" (UniqueName: \"kubernetes.io/projected/3c4c6180-da05-43a3-9f3b-e689e86cb2ac-kube-api-access-7mgsp\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.308421 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rmpl4\" (UniqueName: \"kubernetes.io/projected/f83a1f39-5338-46a1-96b8-384c34957916-kube-api-access-rmpl4\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.308429 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f83a1f39-5338-46a1-96b8-384c34957916-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.308438 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f83a1f39-5338-46a1-96b8-384c34957916-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.308459 4691 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c4c6180-da05-43a3-9f3b-e689e86cb2ac-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.308467 4691 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f83a1f39-5338-46a1-96b8-384c34957916-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.579323 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-t62jq" event={"ID":"3c4c6180-da05-43a3-9f3b-e689e86cb2ac","Type":"ContainerDied","Data":"f0afe9e0dff7f9b64d4802353af4319d4af961e95f249831990821f585f9d617"} Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.579366 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f0afe9e0dff7f9b64d4802353af4319d4af961e95f249831990821f585f9d617" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.579442 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-t62jq" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.593419 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-kkxd2" event={"ID":"f83a1f39-5338-46a1-96b8-384c34957916","Type":"ContainerDied","Data":"6939168a094c11929eae9a52a20182e5e343c6f20510faff62abbc84f0d5faa0"} Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.594610 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6939168a094c11929eae9a52a20182e5e343c6f20510faff62abbc84f0d5faa0" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.593508 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-kkxd2" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.686637 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 24 08:15:47 crc kubenswrapper[4691]: E1124 08:15:47.687086 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f83a1f39-5338-46a1-96b8-384c34957916" containerName="nova-manage" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.687112 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="f83a1f39-5338-46a1-96b8-384c34957916" containerName="nova-manage" Nov 24 08:15:47 crc kubenswrapper[4691]: E1124 08:15:47.687139 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="425954a5-8127-4cbe-879e-ae3124e74ee6" containerName="dnsmasq-dns" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.687148 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="425954a5-8127-4cbe-879e-ae3124e74ee6" containerName="dnsmasq-dns" Nov 24 08:15:47 crc kubenswrapper[4691]: E1124 08:15:47.687180 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="425954a5-8127-4cbe-879e-ae3124e74ee6" containerName="init" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.687188 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="425954a5-8127-4cbe-879e-ae3124e74ee6" containerName="init" Nov 24 08:15:47 crc kubenswrapper[4691]: E1124 08:15:47.687203 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c4c6180-da05-43a3-9f3b-e689e86cb2ac" containerName="nova-cell1-conductor-db-sync" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.687213 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c4c6180-da05-43a3-9f3b-e689e86cb2ac" containerName="nova-cell1-conductor-db-sync" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.687470 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="f83a1f39-5338-46a1-96b8-384c34957916" containerName="nova-manage" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.687495 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c4c6180-da05-43a3-9f3b-e689e86cb2ac" containerName="nova-cell1-conductor-db-sync" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.687507 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="425954a5-8127-4cbe-879e-ae3124e74ee6" containerName="dnsmasq-dns" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.690099 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.692851 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.701049 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 24 08:15:47 crc kubenswrapper[4691]: E1124 08:15:47.752934 4691 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3c4c6180_da05_43a3_9f3b_e689e86cb2ac.slice/crio-f0afe9e0dff7f9b64d4802353af4319d4af961e95f249831990821f585f9d617\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3c4c6180_da05_43a3_9f3b_e689e86cb2ac.slice\": RecentStats: unable to find data in memory cache]" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.794632 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.795533 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="e2c432bd-9833-4e09-9253-46e11eb26503" containerName="nova-api-api" containerID="cri-o://a51c6d8e7c0182f52d7d7aea02bbb3081c09ffc1fc69423c420170742c6f4577" gracePeriod=30 Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.795165 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="e2c432bd-9833-4e09-9253-46e11eb26503" containerName="nova-api-log" containerID="cri-o://e81167fd4858f96c299e27357eec2548db1e66910dbcd954d4b60f6ab5cb54e1" gracePeriod=30 Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.810891 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.811162 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="5d3a4115-27a7-4d2a-a968-a04b5e99dd80" containerName="nova-scheduler-scheduler" containerID="cri-o://5645154630f771f9a4e8f5c7d0b8b38ab36c5a2059e9154ee5d8bb3037231d13" gracePeriod=30 Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.819864 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kgfzm\" (UniqueName: \"kubernetes.io/projected/70312fff-c511-48b1-a398-331d593ca41f-kube-api-access-kgfzm\") pod \"nova-cell1-conductor-0\" (UID: \"70312fff-c511-48b1-a398-331d593ca41f\") " pod="openstack/nova-cell1-conductor-0" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.820043 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70312fff-c511-48b1-a398-331d593ca41f-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"70312fff-c511-48b1-a398-331d593ca41f\") " pod="openstack/nova-cell1-conductor-0" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.820095 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70312fff-c511-48b1-a398-331d593ca41f-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"70312fff-c511-48b1-a398-331d593ca41f\") " pod="openstack/nova-cell1-conductor-0" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.825363 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.825632 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="8712043e-e137-427a-b9bf-ac5e1b89304e" containerName="nova-metadata-log" containerID="cri-o://781f08a3a82ffa396ad91a5b977313358238206c092c6548662f414943c841f8" gracePeriod=30 Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.825783 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="8712043e-e137-427a-b9bf-ac5e1b89304e" containerName="nova-metadata-metadata" containerID="cri-o://8eb0ff060b5df871483b0f662ea830e9f13d5f56c2743eccc33ee29cb03438f5" gracePeriod=30 Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.922064 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kgfzm\" (UniqueName: \"kubernetes.io/projected/70312fff-c511-48b1-a398-331d593ca41f-kube-api-access-kgfzm\") pod \"nova-cell1-conductor-0\" (UID: \"70312fff-c511-48b1-a398-331d593ca41f\") " pod="openstack/nova-cell1-conductor-0" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.923201 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70312fff-c511-48b1-a398-331d593ca41f-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"70312fff-c511-48b1-a398-331d593ca41f\") " pod="openstack/nova-cell1-conductor-0" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.923718 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70312fff-c511-48b1-a398-331d593ca41f-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"70312fff-c511-48b1-a398-331d593ca41f\") " pod="openstack/nova-cell1-conductor-0" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.928033 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70312fff-c511-48b1-a398-331d593ca41f-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"70312fff-c511-48b1-a398-331d593ca41f\") " pod="openstack/nova-cell1-conductor-0" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.931640 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70312fff-c511-48b1-a398-331d593ca41f-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"70312fff-c511-48b1-a398-331d593ca41f\") " pod="openstack/nova-cell1-conductor-0" Nov 24 08:15:47 crc kubenswrapper[4691]: I1124 08:15:47.945013 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kgfzm\" (UniqueName: \"kubernetes.io/projected/70312fff-c511-48b1-a398-331d593ca41f-kube-api-access-kgfzm\") pod \"nova-cell1-conductor-0\" (UID: \"70312fff-c511-48b1-a398-331d593ca41f\") " pod="openstack/nova-cell1-conductor-0" Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.010583 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.468647 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.535187 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 24 08:15:48 crc kubenswrapper[4691]: W1124 08:15:48.537503 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod70312fff_c511_48b1_a398_331d593ca41f.slice/crio-a4c3b5743e53a3b1b941720ab4bec0f4bddd2e5d9053a9158ee2bf6b0a809a4b WatchSource:0}: Error finding container a4c3b5743e53a3b1b941720ab4bec0f4bddd2e5d9053a9158ee2bf6b0a809a4b: Status 404 returned error can't find the container with id a4c3b5743e53a3b1b941720ab4bec0f4bddd2e5d9053a9158ee2bf6b0a809a4b Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.615320 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"70312fff-c511-48b1-a398-331d593ca41f","Type":"ContainerStarted","Data":"a4c3b5743e53a3b1b941720ab4bec0f4bddd2e5d9053a9158ee2bf6b0a809a4b"} Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.617161 4691 generic.go:334] "Generic (PLEG): container finished" podID="e2c432bd-9833-4e09-9253-46e11eb26503" containerID="e81167fd4858f96c299e27357eec2548db1e66910dbcd954d4b60f6ab5cb54e1" exitCode=143 Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.617211 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e2c432bd-9833-4e09-9253-46e11eb26503","Type":"ContainerDied","Data":"e81167fd4858f96c299e27357eec2548db1e66910dbcd954d4b60f6ab5cb54e1"} Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.618781 4691 generic.go:334] "Generic (PLEG): container finished" podID="8712043e-e137-427a-b9bf-ac5e1b89304e" containerID="8eb0ff060b5df871483b0f662ea830e9f13d5f56c2743eccc33ee29cb03438f5" exitCode=0 Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.618807 4691 generic.go:334] "Generic (PLEG): container finished" podID="8712043e-e137-427a-b9bf-ac5e1b89304e" containerID="781f08a3a82ffa396ad91a5b977313358238206c092c6548662f414943c841f8" exitCode=143 Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.618824 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8712043e-e137-427a-b9bf-ac5e1b89304e","Type":"ContainerDied","Data":"8eb0ff060b5df871483b0f662ea830e9f13d5f56c2743eccc33ee29cb03438f5"} Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.618844 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8712043e-e137-427a-b9bf-ac5e1b89304e","Type":"ContainerDied","Data":"781f08a3a82ffa396ad91a5b977313358238206c092c6548662f414943c841f8"} Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.618857 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8712043e-e137-427a-b9bf-ac5e1b89304e","Type":"ContainerDied","Data":"dfef136379c68642c165ad047bb4313a4ebb3d1ddb9c4d4bba6e03143559494a"} Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.618875 4691 scope.go:117] "RemoveContainer" containerID="8eb0ff060b5df871483b0f662ea830e9f13d5f56c2743eccc33ee29cb03438f5" Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.619014 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.640584 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8712043e-e137-427a-b9bf-ac5e1b89304e-config-data\") pod \"8712043e-e137-427a-b9bf-ac5e1b89304e\" (UID: \"8712043e-e137-427a-b9bf-ac5e1b89304e\") " Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.640717 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8712043e-e137-427a-b9bf-ac5e1b89304e-nova-metadata-tls-certs\") pod \"8712043e-e137-427a-b9bf-ac5e1b89304e\" (UID: \"8712043e-e137-427a-b9bf-ac5e1b89304e\") " Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.640766 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gxrwj\" (UniqueName: \"kubernetes.io/projected/8712043e-e137-427a-b9bf-ac5e1b89304e-kube-api-access-gxrwj\") pod \"8712043e-e137-427a-b9bf-ac5e1b89304e\" (UID: \"8712043e-e137-427a-b9bf-ac5e1b89304e\") " Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.640992 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8712043e-e137-427a-b9bf-ac5e1b89304e-combined-ca-bundle\") pod \"8712043e-e137-427a-b9bf-ac5e1b89304e\" (UID: \"8712043e-e137-427a-b9bf-ac5e1b89304e\") " Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.641094 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8712043e-e137-427a-b9bf-ac5e1b89304e-logs\") pod \"8712043e-e137-427a-b9bf-ac5e1b89304e\" (UID: \"8712043e-e137-427a-b9bf-ac5e1b89304e\") " Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.642432 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8712043e-e137-427a-b9bf-ac5e1b89304e-logs" (OuterVolumeSpecName: "logs") pod "8712043e-e137-427a-b9bf-ac5e1b89304e" (UID: "8712043e-e137-427a-b9bf-ac5e1b89304e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.644928 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8712043e-e137-427a-b9bf-ac5e1b89304e-kube-api-access-gxrwj" (OuterVolumeSpecName: "kube-api-access-gxrwj") pod "8712043e-e137-427a-b9bf-ac5e1b89304e" (UID: "8712043e-e137-427a-b9bf-ac5e1b89304e"). InnerVolumeSpecName "kube-api-access-gxrwj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.667018 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8712043e-e137-427a-b9bf-ac5e1b89304e-config-data" (OuterVolumeSpecName: "config-data") pod "8712043e-e137-427a-b9bf-ac5e1b89304e" (UID: "8712043e-e137-427a-b9bf-ac5e1b89304e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.672397 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8712043e-e137-427a-b9bf-ac5e1b89304e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8712043e-e137-427a-b9bf-ac5e1b89304e" (UID: "8712043e-e137-427a-b9bf-ac5e1b89304e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.681763 4691 scope.go:117] "RemoveContainer" containerID="781f08a3a82ffa396ad91a5b977313358238206c092c6548662f414943c841f8" Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.707222 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8712043e-e137-427a-b9bf-ac5e1b89304e-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "8712043e-e137-427a-b9bf-ac5e1b89304e" (UID: "8712043e-e137-427a-b9bf-ac5e1b89304e"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.709581 4691 scope.go:117] "RemoveContainer" containerID="8eb0ff060b5df871483b0f662ea830e9f13d5f56c2743eccc33ee29cb03438f5" Nov 24 08:15:48 crc kubenswrapper[4691]: E1124 08:15:48.710144 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8eb0ff060b5df871483b0f662ea830e9f13d5f56c2743eccc33ee29cb03438f5\": container with ID starting with 8eb0ff060b5df871483b0f662ea830e9f13d5f56c2743eccc33ee29cb03438f5 not found: ID does not exist" containerID="8eb0ff060b5df871483b0f662ea830e9f13d5f56c2743eccc33ee29cb03438f5" Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.710211 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8eb0ff060b5df871483b0f662ea830e9f13d5f56c2743eccc33ee29cb03438f5"} err="failed to get container status \"8eb0ff060b5df871483b0f662ea830e9f13d5f56c2743eccc33ee29cb03438f5\": rpc error: code = NotFound desc = could not find container \"8eb0ff060b5df871483b0f662ea830e9f13d5f56c2743eccc33ee29cb03438f5\": container with ID starting with 8eb0ff060b5df871483b0f662ea830e9f13d5f56c2743eccc33ee29cb03438f5 not found: ID does not exist" Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.710245 4691 scope.go:117] "RemoveContainer" containerID="781f08a3a82ffa396ad91a5b977313358238206c092c6548662f414943c841f8" Nov 24 08:15:48 crc kubenswrapper[4691]: E1124 08:15:48.711483 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"781f08a3a82ffa396ad91a5b977313358238206c092c6548662f414943c841f8\": container with ID starting with 781f08a3a82ffa396ad91a5b977313358238206c092c6548662f414943c841f8 not found: ID does not exist" containerID="781f08a3a82ffa396ad91a5b977313358238206c092c6548662f414943c841f8" Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.711516 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"781f08a3a82ffa396ad91a5b977313358238206c092c6548662f414943c841f8"} err="failed to get container status \"781f08a3a82ffa396ad91a5b977313358238206c092c6548662f414943c841f8\": rpc error: code = NotFound desc = could not find container \"781f08a3a82ffa396ad91a5b977313358238206c092c6548662f414943c841f8\": container with ID starting with 781f08a3a82ffa396ad91a5b977313358238206c092c6548662f414943c841f8 not found: ID does not exist" Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.711535 4691 scope.go:117] "RemoveContainer" containerID="8eb0ff060b5df871483b0f662ea830e9f13d5f56c2743eccc33ee29cb03438f5" Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.711811 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8eb0ff060b5df871483b0f662ea830e9f13d5f56c2743eccc33ee29cb03438f5"} err="failed to get container status \"8eb0ff060b5df871483b0f662ea830e9f13d5f56c2743eccc33ee29cb03438f5\": rpc error: code = NotFound desc = could not find container \"8eb0ff060b5df871483b0f662ea830e9f13d5f56c2743eccc33ee29cb03438f5\": container with ID starting with 8eb0ff060b5df871483b0f662ea830e9f13d5f56c2743eccc33ee29cb03438f5 not found: ID does not exist" Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.711841 4691 scope.go:117] "RemoveContainer" containerID="781f08a3a82ffa396ad91a5b977313358238206c092c6548662f414943c841f8" Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.712063 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"781f08a3a82ffa396ad91a5b977313358238206c092c6548662f414943c841f8"} err="failed to get container status \"781f08a3a82ffa396ad91a5b977313358238206c092c6548662f414943c841f8\": rpc error: code = NotFound desc = could not find container \"781f08a3a82ffa396ad91a5b977313358238206c092c6548662f414943c841f8\": container with ID starting with 781f08a3a82ffa396ad91a5b977313358238206c092c6548662f414943c841f8 not found: ID does not exist" Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.743704 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8712043e-e137-427a-b9bf-ac5e1b89304e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.743740 4691 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8712043e-e137-427a-b9bf-ac5e1b89304e-logs\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.743755 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8712043e-e137-427a-b9bf-ac5e1b89304e-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.743770 4691 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8712043e-e137-427a-b9bf-ac5e1b89304e-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.743782 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gxrwj\" (UniqueName: \"kubernetes.io/projected/8712043e-e137-427a-b9bf-ac5e1b89304e-kube-api-access-gxrwj\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.943586 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 08:15:48 crc kubenswrapper[4691]: I1124 08:15:48.973456 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 08:15:49 crc kubenswrapper[4691]: I1124 08:15:49.019726 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 24 08:15:49 crc kubenswrapper[4691]: E1124 08:15:49.021193 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8712043e-e137-427a-b9bf-ac5e1b89304e" containerName="nova-metadata-metadata" Nov 24 08:15:49 crc kubenswrapper[4691]: I1124 08:15:49.021217 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="8712043e-e137-427a-b9bf-ac5e1b89304e" containerName="nova-metadata-metadata" Nov 24 08:15:49 crc kubenswrapper[4691]: E1124 08:15:49.021262 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8712043e-e137-427a-b9bf-ac5e1b89304e" containerName="nova-metadata-log" Nov 24 08:15:49 crc kubenswrapper[4691]: I1124 08:15:49.021268 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="8712043e-e137-427a-b9bf-ac5e1b89304e" containerName="nova-metadata-log" Nov 24 08:15:49 crc kubenswrapper[4691]: I1124 08:15:49.053074 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="8712043e-e137-427a-b9bf-ac5e1b89304e" containerName="nova-metadata-metadata" Nov 24 08:15:49 crc kubenswrapper[4691]: I1124 08:15:49.053124 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="8712043e-e137-427a-b9bf-ac5e1b89304e" containerName="nova-metadata-log" Nov 24 08:15:49 crc kubenswrapper[4691]: I1124 08:15:49.054947 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 08:15:49 crc kubenswrapper[4691]: I1124 08:15:49.061465 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 08:15:49 crc kubenswrapper[4691]: I1124 08:15:49.063199 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 24 08:15:49 crc kubenswrapper[4691]: I1124 08:15:49.063435 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 24 08:15:49 crc kubenswrapper[4691]: I1124 08:15:49.161403 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9549094b-f8c2-4131-89df-b6156a161466-config-data\") pod \"nova-metadata-0\" (UID: \"9549094b-f8c2-4131-89df-b6156a161466\") " pod="openstack/nova-metadata-0" Nov 24 08:15:49 crc kubenswrapper[4691]: I1124 08:15:49.161490 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9549094b-f8c2-4131-89df-b6156a161466-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9549094b-f8c2-4131-89df-b6156a161466\") " pod="openstack/nova-metadata-0" Nov 24 08:15:49 crc kubenswrapper[4691]: I1124 08:15:49.161534 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9549094b-f8c2-4131-89df-b6156a161466-logs\") pod \"nova-metadata-0\" (UID: \"9549094b-f8c2-4131-89df-b6156a161466\") " pod="openstack/nova-metadata-0" Nov 24 08:15:49 crc kubenswrapper[4691]: I1124 08:15:49.161841 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9549094b-f8c2-4131-89df-b6156a161466-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9549094b-f8c2-4131-89df-b6156a161466\") " pod="openstack/nova-metadata-0" Nov 24 08:15:49 crc kubenswrapper[4691]: I1124 08:15:49.162081 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qvwpr\" (UniqueName: \"kubernetes.io/projected/9549094b-f8c2-4131-89df-b6156a161466-kube-api-access-qvwpr\") pod \"nova-metadata-0\" (UID: \"9549094b-f8c2-4131-89df-b6156a161466\") " pod="openstack/nova-metadata-0" Nov 24 08:15:49 crc kubenswrapper[4691]: I1124 08:15:49.263992 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9549094b-f8c2-4131-89df-b6156a161466-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9549094b-f8c2-4131-89df-b6156a161466\") " pod="openstack/nova-metadata-0" Nov 24 08:15:49 crc kubenswrapper[4691]: I1124 08:15:49.264082 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qvwpr\" (UniqueName: \"kubernetes.io/projected/9549094b-f8c2-4131-89df-b6156a161466-kube-api-access-qvwpr\") pod \"nova-metadata-0\" (UID: \"9549094b-f8c2-4131-89df-b6156a161466\") " pod="openstack/nova-metadata-0" Nov 24 08:15:49 crc kubenswrapper[4691]: I1124 08:15:49.264126 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9549094b-f8c2-4131-89df-b6156a161466-config-data\") pod \"nova-metadata-0\" (UID: \"9549094b-f8c2-4131-89df-b6156a161466\") " pod="openstack/nova-metadata-0" Nov 24 08:15:49 crc kubenswrapper[4691]: I1124 08:15:49.264156 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9549094b-f8c2-4131-89df-b6156a161466-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9549094b-f8c2-4131-89df-b6156a161466\") " pod="openstack/nova-metadata-0" Nov 24 08:15:49 crc kubenswrapper[4691]: I1124 08:15:49.264193 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9549094b-f8c2-4131-89df-b6156a161466-logs\") pod \"nova-metadata-0\" (UID: \"9549094b-f8c2-4131-89df-b6156a161466\") " pod="openstack/nova-metadata-0" Nov 24 08:15:49 crc kubenswrapper[4691]: I1124 08:15:49.264678 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9549094b-f8c2-4131-89df-b6156a161466-logs\") pod \"nova-metadata-0\" (UID: \"9549094b-f8c2-4131-89df-b6156a161466\") " pod="openstack/nova-metadata-0" Nov 24 08:15:49 crc kubenswrapper[4691]: I1124 08:15:49.269799 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9549094b-f8c2-4131-89df-b6156a161466-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9549094b-f8c2-4131-89df-b6156a161466\") " pod="openstack/nova-metadata-0" Nov 24 08:15:49 crc kubenswrapper[4691]: I1124 08:15:49.269847 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9549094b-f8c2-4131-89df-b6156a161466-config-data\") pod \"nova-metadata-0\" (UID: \"9549094b-f8c2-4131-89df-b6156a161466\") " pod="openstack/nova-metadata-0" Nov 24 08:15:49 crc kubenswrapper[4691]: I1124 08:15:49.269981 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9549094b-f8c2-4131-89df-b6156a161466-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9549094b-f8c2-4131-89df-b6156a161466\") " pod="openstack/nova-metadata-0" Nov 24 08:15:49 crc kubenswrapper[4691]: I1124 08:15:49.281596 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qvwpr\" (UniqueName: \"kubernetes.io/projected/9549094b-f8c2-4131-89df-b6156a161466-kube-api-access-qvwpr\") pod \"nova-metadata-0\" (UID: \"9549094b-f8c2-4131-89df-b6156a161466\") " pod="openstack/nova-metadata-0" Nov 24 08:15:49 crc kubenswrapper[4691]: I1124 08:15:49.383135 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 08:15:49 crc kubenswrapper[4691]: I1124 08:15:49.630039 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"70312fff-c511-48b1-a398-331d593ca41f","Type":"ContainerStarted","Data":"fd2572e7e173fd5d35a9f397aca46c0daa31d0425ccaa48251f3959af9a8e6bc"} Nov 24 08:15:49 crc kubenswrapper[4691]: I1124 08:15:49.630403 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 24 08:15:49 crc kubenswrapper[4691]: I1124 08:15:49.658299 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.658280404 podStartE2EDuration="2.658280404s" podCreationTimestamp="2025-11-24 08:15:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:15:49.649796258 +0000 UTC m=+1111.648745507" watchObservedRunningTime="2025-11-24 08:15:49.658280404 +0000 UTC m=+1111.657229653" Nov 24 08:15:49 crc kubenswrapper[4691]: I1124 08:15:49.862530 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 08:15:49 crc kubenswrapper[4691]: E1124 08:15:49.903862 4691 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5645154630f771f9a4e8f5c7d0b8b38ab36c5a2059e9154ee5d8bb3037231d13" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 24 08:15:49 crc kubenswrapper[4691]: E1124 08:15:49.905055 4691 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5645154630f771f9a4e8f5c7d0b8b38ab36c5a2059e9154ee5d8bb3037231d13" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 24 08:15:49 crc kubenswrapper[4691]: E1124 08:15:49.906454 4691 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5645154630f771f9a4e8f5c7d0b8b38ab36c5a2059e9154ee5d8bb3037231d13" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 24 08:15:49 crc kubenswrapper[4691]: E1124 08:15:49.906495 4691 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="5d3a4115-27a7-4d2a-a968-a04b5e99dd80" containerName="nova-scheduler-scheduler" Nov 24 08:15:50 crc kubenswrapper[4691]: I1124 08:15:50.644352 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9549094b-f8c2-4131-89df-b6156a161466","Type":"ContainerStarted","Data":"b6407f48c11b4a2e192a41df9e7df85f301a3b58f9582ed74c25bc11bca45f95"} Nov 24 08:15:50 crc kubenswrapper[4691]: I1124 08:15:50.644736 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9549094b-f8c2-4131-89df-b6156a161466","Type":"ContainerStarted","Data":"86337d96e8e976e9aa45c76b32c593544c897b90fd0766119ed4331c183b750e"} Nov 24 08:15:50 crc kubenswrapper[4691]: I1124 08:15:50.644981 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9549094b-f8c2-4131-89df-b6156a161466","Type":"ContainerStarted","Data":"e96be060136a60e824bb1f41525f74375b910551d26a91ed338bc3e171013f52"} Nov 24 08:15:50 crc kubenswrapper[4691]: I1124 08:15:50.774588 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8712043e-e137-427a-b9bf-ac5e1b89304e" path="/var/lib/kubelet/pods/8712043e-e137-427a-b9bf-ac5e1b89304e/volumes" Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.089267 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.089355 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.447213 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.473125 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.473103144 podStartE2EDuration="3.473103144s" podCreationTimestamp="2025-11-24 08:15:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:15:50.676292477 +0000 UTC m=+1112.675241726" watchObservedRunningTime="2025-11-24 08:15:51.473103144 +0000 UTC m=+1113.472052393" Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.504405 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e2c432bd-9833-4e09-9253-46e11eb26503-logs\") pod \"e2c432bd-9833-4e09-9253-46e11eb26503\" (UID: \"e2c432bd-9833-4e09-9253-46e11eb26503\") " Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.504615 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2c432bd-9833-4e09-9253-46e11eb26503-config-data\") pod \"e2c432bd-9833-4e09-9253-46e11eb26503\" (UID: \"e2c432bd-9833-4e09-9253-46e11eb26503\") " Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.504680 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wbdvz\" (UniqueName: \"kubernetes.io/projected/e2c432bd-9833-4e09-9253-46e11eb26503-kube-api-access-wbdvz\") pod \"e2c432bd-9833-4e09-9253-46e11eb26503\" (UID: \"e2c432bd-9833-4e09-9253-46e11eb26503\") " Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.504749 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2c432bd-9833-4e09-9253-46e11eb26503-combined-ca-bundle\") pod \"e2c432bd-9833-4e09-9253-46e11eb26503\" (UID: \"e2c432bd-9833-4e09-9253-46e11eb26503\") " Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.505571 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e2c432bd-9833-4e09-9253-46e11eb26503-logs" (OuterVolumeSpecName: "logs") pod "e2c432bd-9833-4e09-9253-46e11eb26503" (UID: "e2c432bd-9833-4e09-9253-46e11eb26503"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.518530 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2c432bd-9833-4e09-9253-46e11eb26503-kube-api-access-wbdvz" (OuterVolumeSpecName: "kube-api-access-wbdvz") pod "e2c432bd-9833-4e09-9253-46e11eb26503" (UID: "e2c432bd-9833-4e09-9253-46e11eb26503"). InnerVolumeSpecName "kube-api-access-wbdvz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.534341 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2c432bd-9833-4e09-9253-46e11eb26503-config-data" (OuterVolumeSpecName: "config-data") pod "e2c432bd-9833-4e09-9253-46e11eb26503" (UID: "e2c432bd-9833-4e09-9253-46e11eb26503"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.548963 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2c432bd-9833-4e09-9253-46e11eb26503-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e2c432bd-9833-4e09-9253-46e11eb26503" (UID: "e2c432bd-9833-4e09-9253-46e11eb26503"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.607567 4691 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e2c432bd-9833-4e09-9253-46e11eb26503-logs\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.607613 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2c432bd-9833-4e09-9253-46e11eb26503-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.607629 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wbdvz\" (UniqueName: \"kubernetes.io/projected/e2c432bd-9833-4e09-9253-46e11eb26503-kube-api-access-wbdvz\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.607642 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2c432bd-9833-4e09-9253-46e11eb26503-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.659386 4691 generic.go:334] "Generic (PLEG): container finished" podID="5d3a4115-27a7-4d2a-a968-a04b5e99dd80" containerID="5645154630f771f9a4e8f5c7d0b8b38ab36c5a2059e9154ee5d8bb3037231d13" exitCode=0 Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.659484 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"5d3a4115-27a7-4d2a-a968-a04b5e99dd80","Type":"ContainerDied","Data":"5645154630f771f9a4e8f5c7d0b8b38ab36c5a2059e9154ee5d8bb3037231d13"} Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.661552 4691 generic.go:334] "Generic (PLEG): container finished" podID="e2c432bd-9833-4e09-9253-46e11eb26503" containerID="a51c6d8e7c0182f52d7d7aea02bbb3081c09ffc1fc69423c420170742c6f4577" exitCode=0 Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.662546 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.664293 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e2c432bd-9833-4e09-9253-46e11eb26503","Type":"ContainerDied","Data":"a51c6d8e7c0182f52d7d7aea02bbb3081c09ffc1fc69423c420170742c6f4577"} Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.664359 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e2c432bd-9833-4e09-9253-46e11eb26503","Type":"ContainerDied","Data":"229ee24775b5ee15e1c3565f7b574350b62c7cbd1102ac5a5455da2d32006f2b"} Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.665474 4691 scope.go:117] "RemoveContainer" containerID="a51c6d8e7c0182f52d7d7aea02bbb3081c09ffc1fc69423c420170742c6f4577" Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.748861 4691 scope.go:117] "RemoveContainer" containerID="e81167fd4858f96c299e27357eec2548db1e66910dbcd954d4b60f6ab5cb54e1" Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.750859 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.764967 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.777649 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 24 08:15:51 crc kubenswrapper[4691]: E1124 08:15:51.778077 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2c432bd-9833-4e09-9253-46e11eb26503" containerName="nova-api-log" Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.778100 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2c432bd-9833-4e09-9253-46e11eb26503" containerName="nova-api-log" Nov 24 08:15:51 crc kubenswrapper[4691]: E1124 08:15:51.778128 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2c432bd-9833-4e09-9253-46e11eb26503" containerName="nova-api-api" Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.778137 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2c432bd-9833-4e09-9253-46e11eb26503" containerName="nova-api-api" Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.778379 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2c432bd-9833-4e09-9253-46e11eb26503" containerName="nova-api-log" Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.778403 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2c432bd-9833-4e09-9253-46e11eb26503" containerName="nova-api-api" Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.779653 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.783314 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.785306 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.789233 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.817706 4691 scope.go:117] "RemoveContainer" containerID="a51c6d8e7c0182f52d7d7aea02bbb3081c09ffc1fc69423c420170742c6f4577" Nov 24 08:15:51 crc kubenswrapper[4691]: E1124 08:15:51.818247 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a51c6d8e7c0182f52d7d7aea02bbb3081c09ffc1fc69423c420170742c6f4577\": container with ID starting with a51c6d8e7c0182f52d7d7aea02bbb3081c09ffc1fc69423c420170742c6f4577 not found: ID does not exist" containerID="a51c6d8e7c0182f52d7d7aea02bbb3081c09ffc1fc69423c420170742c6f4577" Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.818280 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a51c6d8e7c0182f52d7d7aea02bbb3081c09ffc1fc69423c420170742c6f4577"} err="failed to get container status \"a51c6d8e7c0182f52d7d7aea02bbb3081c09ffc1fc69423c420170742c6f4577\": rpc error: code = NotFound desc = could not find container \"a51c6d8e7c0182f52d7d7aea02bbb3081c09ffc1fc69423c420170742c6f4577\": container with ID starting with a51c6d8e7c0182f52d7d7aea02bbb3081c09ffc1fc69423c420170742c6f4577 not found: ID does not exist" Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.818308 4691 scope.go:117] "RemoveContainer" containerID="e81167fd4858f96c299e27357eec2548db1e66910dbcd954d4b60f6ab5cb54e1" Nov 24 08:15:51 crc kubenswrapper[4691]: E1124 08:15:51.818537 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e81167fd4858f96c299e27357eec2548db1e66910dbcd954d4b60f6ab5cb54e1\": container with ID starting with e81167fd4858f96c299e27357eec2548db1e66910dbcd954d4b60f6ab5cb54e1 not found: ID does not exist" containerID="e81167fd4858f96c299e27357eec2548db1e66910dbcd954d4b60f6ab5cb54e1" Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.818559 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e81167fd4858f96c299e27357eec2548db1e66910dbcd954d4b60f6ab5cb54e1"} err="failed to get container status \"e81167fd4858f96c299e27357eec2548db1e66910dbcd954d4b60f6ab5cb54e1\": rpc error: code = NotFound desc = could not find container \"e81167fd4858f96c299e27357eec2548db1e66910dbcd954d4b60f6ab5cb54e1\": container with ID starting with e81167fd4858f96c299e27357eec2548db1e66910dbcd954d4b60f6ab5cb54e1 not found: ID does not exist" Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.936096 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2mqpk\" (UniqueName: \"kubernetes.io/projected/eadd696b-88c5-4e0c-aa45-3902eca76895-kube-api-access-2mqpk\") pod \"nova-api-0\" (UID: \"eadd696b-88c5-4e0c-aa45-3902eca76895\") " pod="openstack/nova-api-0" Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.936470 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eadd696b-88c5-4e0c-aa45-3902eca76895-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"eadd696b-88c5-4e0c-aa45-3902eca76895\") " pod="openstack/nova-api-0" Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.936504 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/eadd696b-88c5-4e0c-aa45-3902eca76895-logs\") pod \"nova-api-0\" (UID: \"eadd696b-88c5-4e0c-aa45-3902eca76895\") " pod="openstack/nova-api-0" Nov 24 08:15:51 crc kubenswrapper[4691]: I1124 08:15:51.936586 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eadd696b-88c5-4e0c-aa45-3902eca76895-config-data\") pod \"nova-api-0\" (UID: \"eadd696b-88c5-4e0c-aa45-3902eca76895\") " pod="openstack/nova-api-0" Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.012787 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.039936 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2mqpk\" (UniqueName: \"kubernetes.io/projected/eadd696b-88c5-4e0c-aa45-3902eca76895-kube-api-access-2mqpk\") pod \"nova-api-0\" (UID: \"eadd696b-88c5-4e0c-aa45-3902eca76895\") " pod="openstack/nova-api-0" Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.040032 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eadd696b-88c5-4e0c-aa45-3902eca76895-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"eadd696b-88c5-4e0c-aa45-3902eca76895\") " pod="openstack/nova-api-0" Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.040053 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/eadd696b-88c5-4e0c-aa45-3902eca76895-logs\") pod \"nova-api-0\" (UID: \"eadd696b-88c5-4e0c-aa45-3902eca76895\") " pod="openstack/nova-api-0" Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.040130 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eadd696b-88c5-4e0c-aa45-3902eca76895-config-data\") pod \"nova-api-0\" (UID: \"eadd696b-88c5-4e0c-aa45-3902eca76895\") " pod="openstack/nova-api-0" Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.040760 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/eadd696b-88c5-4e0c-aa45-3902eca76895-logs\") pod \"nova-api-0\" (UID: \"eadd696b-88c5-4e0c-aa45-3902eca76895\") " pod="openstack/nova-api-0" Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.044069 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eadd696b-88c5-4e0c-aa45-3902eca76895-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"eadd696b-88c5-4e0c-aa45-3902eca76895\") " pod="openstack/nova-api-0" Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.045329 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eadd696b-88c5-4e0c-aa45-3902eca76895-config-data\") pod \"nova-api-0\" (UID: \"eadd696b-88c5-4e0c-aa45-3902eca76895\") " pod="openstack/nova-api-0" Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.057337 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2mqpk\" (UniqueName: \"kubernetes.io/projected/eadd696b-88c5-4e0c-aa45-3902eca76895-kube-api-access-2mqpk\") pod \"nova-api-0\" (UID: \"eadd696b-88c5-4e0c-aa45-3902eca76895\") " pod="openstack/nova-api-0" Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.106514 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.141648 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-86flb\" (UniqueName: \"kubernetes.io/projected/5d3a4115-27a7-4d2a-a968-a04b5e99dd80-kube-api-access-86flb\") pod \"5d3a4115-27a7-4d2a-a968-a04b5e99dd80\" (UID: \"5d3a4115-27a7-4d2a-a968-a04b5e99dd80\") " Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.141827 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d3a4115-27a7-4d2a-a968-a04b5e99dd80-combined-ca-bundle\") pod \"5d3a4115-27a7-4d2a-a968-a04b5e99dd80\" (UID: \"5d3a4115-27a7-4d2a-a968-a04b5e99dd80\") " Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.141937 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d3a4115-27a7-4d2a-a968-a04b5e99dd80-config-data\") pod \"5d3a4115-27a7-4d2a-a968-a04b5e99dd80\" (UID: \"5d3a4115-27a7-4d2a-a968-a04b5e99dd80\") " Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.145116 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d3a4115-27a7-4d2a-a968-a04b5e99dd80-kube-api-access-86flb" (OuterVolumeSpecName: "kube-api-access-86flb") pod "5d3a4115-27a7-4d2a-a968-a04b5e99dd80" (UID: "5d3a4115-27a7-4d2a-a968-a04b5e99dd80"). InnerVolumeSpecName "kube-api-access-86flb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.168645 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d3a4115-27a7-4d2a-a968-a04b5e99dd80-config-data" (OuterVolumeSpecName: "config-data") pod "5d3a4115-27a7-4d2a-a968-a04b5e99dd80" (UID: "5d3a4115-27a7-4d2a-a968-a04b5e99dd80"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.188537 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d3a4115-27a7-4d2a-a968-a04b5e99dd80-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5d3a4115-27a7-4d2a-a968-a04b5e99dd80" (UID: "5d3a4115-27a7-4d2a-a968-a04b5e99dd80"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.244694 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d3a4115-27a7-4d2a-a968-a04b5e99dd80-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.244726 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d3a4115-27a7-4d2a-a968-a04b5e99dd80-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.244735 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-86flb\" (UniqueName: \"kubernetes.io/projected/5d3a4115-27a7-4d2a-a968-a04b5e99dd80-kube-api-access-86flb\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.606507 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 24 08:15:52 crc kubenswrapper[4691]: W1124 08:15:52.618700 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podeadd696b_88c5_4e0c_aa45_3902eca76895.slice/crio-490649e57a1f0bb436e92115fd9e2e63af4316aa0d6b550c356b8b90d2fe805d WatchSource:0}: Error finding container 490649e57a1f0bb436e92115fd9e2e63af4316aa0d6b550c356b8b90d2fe805d: Status 404 returned error can't find the container with id 490649e57a1f0bb436e92115fd9e2e63af4316aa0d6b550c356b8b90d2fe805d Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.678729 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.678720 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"5d3a4115-27a7-4d2a-a968-a04b5e99dd80","Type":"ContainerDied","Data":"720ec97bea25be284cea46328715b05ee68e13cc9967bee6a14df495ed161e1d"} Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.678882 4691 scope.go:117] "RemoveContainer" containerID="5645154630f771f9a4e8f5c7d0b8b38ab36c5a2059e9154ee5d8bb3037231d13" Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.696015 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"eadd696b-88c5-4e0c-aa45-3902eca76895","Type":"ContainerStarted","Data":"490649e57a1f0bb436e92115fd9e2e63af4316aa0d6b550c356b8b90d2fe805d"} Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.754366 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.783029 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e2c432bd-9833-4e09-9253-46e11eb26503" path="/var/lib/kubelet/pods/e2c432bd-9833-4e09-9253-46e11eb26503/volumes" Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.783644 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.795478 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 08:15:52 crc kubenswrapper[4691]: E1124 08:15:52.796022 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d3a4115-27a7-4d2a-a968-a04b5e99dd80" containerName="nova-scheduler-scheduler" Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.796047 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d3a4115-27a7-4d2a-a968-a04b5e99dd80" containerName="nova-scheduler-scheduler" Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.796278 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d3a4115-27a7-4d2a-a968-a04b5e99dd80" containerName="nova-scheduler-scheduler" Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.797057 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.800690 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.817794 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.960205 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/730d583b-8557-4759-b3f6-f53fe9e3b73f-config-data\") pod \"nova-scheduler-0\" (UID: \"730d583b-8557-4759-b3f6-f53fe9e3b73f\") " pod="openstack/nova-scheduler-0" Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.960572 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/730d583b-8557-4759-b3f6-f53fe9e3b73f-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"730d583b-8557-4759-b3f6-f53fe9e3b73f\") " pod="openstack/nova-scheduler-0" Nov 24 08:15:52 crc kubenswrapper[4691]: I1124 08:15:52.960928 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pc9db\" (UniqueName: \"kubernetes.io/projected/730d583b-8557-4759-b3f6-f53fe9e3b73f-kube-api-access-pc9db\") pod \"nova-scheduler-0\" (UID: \"730d583b-8557-4759-b3f6-f53fe9e3b73f\") " pod="openstack/nova-scheduler-0" Nov 24 08:15:53 crc kubenswrapper[4691]: I1124 08:15:53.043704 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 24 08:15:53 crc kubenswrapper[4691]: I1124 08:15:53.062780 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/730d583b-8557-4759-b3f6-f53fe9e3b73f-config-data\") pod \"nova-scheduler-0\" (UID: \"730d583b-8557-4759-b3f6-f53fe9e3b73f\") " pod="openstack/nova-scheduler-0" Nov 24 08:15:53 crc kubenswrapper[4691]: I1124 08:15:53.062896 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/730d583b-8557-4759-b3f6-f53fe9e3b73f-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"730d583b-8557-4759-b3f6-f53fe9e3b73f\") " pod="openstack/nova-scheduler-0" Nov 24 08:15:53 crc kubenswrapper[4691]: I1124 08:15:53.062976 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pc9db\" (UniqueName: \"kubernetes.io/projected/730d583b-8557-4759-b3f6-f53fe9e3b73f-kube-api-access-pc9db\") pod \"nova-scheduler-0\" (UID: \"730d583b-8557-4759-b3f6-f53fe9e3b73f\") " pod="openstack/nova-scheduler-0" Nov 24 08:15:53 crc kubenswrapper[4691]: I1124 08:15:53.067178 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/730d583b-8557-4759-b3f6-f53fe9e3b73f-config-data\") pod \"nova-scheduler-0\" (UID: \"730d583b-8557-4759-b3f6-f53fe9e3b73f\") " pod="openstack/nova-scheduler-0" Nov 24 08:15:53 crc kubenswrapper[4691]: I1124 08:15:53.067821 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/730d583b-8557-4759-b3f6-f53fe9e3b73f-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"730d583b-8557-4759-b3f6-f53fe9e3b73f\") " pod="openstack/nova-scheduler-0" Nov 24 08:15:53 crc kubenswrapper[4691]: I1124 08:15:53.085350 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pc9db\" (UniqueName: \"kubernetes.io/projected/730d583b-8557-4759-b3f6-f53fe9e3b73f-kube-api-access-pc9db\") pod \"nova-scheduler-0\" (UID: \"730d583b-8557-4759-b3f6-f53fe9e3b73f\") " pod="openstack/nova-scheduler-0" Nov 24 08:15:53 crc kubenswrapper[4691]: I1124 08:15:53.124607 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 24 08:15:53 crc kubenswrapper[4691]: I1124 08:15:53.613075 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 08:15:53 crc kubenswrapper[4691]: I1124 08:15:53.713899 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"eadd696b-88c5-4e0c-aa45-3902eca76895","Type":"ContainerStarted","Data":"319d338eabfb730de8011e065b56fdc10c38e02e1c158ff3841df300cb257b2c"} Nov 24 08:15:53 crc kubenswrapper[4691]: I1124 08:15:53.713982 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"eadd696b-88c5-4e0c-aa45-3902eca76895","Type":"ContainerStarted","Data":"06c84d588addc7123b511a4130eac9c444c4a659b998be5ec401aff32dd71c7d"} Nov 24 08:15:53 crc kubenswrapper[4691]: I1124 08:15:53.724772 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"730d583b-8557-4759-b3f6-f53fe9e3b73f","Type":"ContainerStarted","Data":"91f06601d94bf00f45d094cd8ed40666da131e01de217a588f0e2549a3888153"} Nov 24 08:15:53 crc kubenswrapper[4691]: I1124 08:15:53.752322 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.752298084 podStartE2EDuration="2.752298084s" podCreationTimestamp="2025-11-24 08:15:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:15:53.739314038 +0000 UTC m=+1115.738263287" watchObservedRunningTime="2025-11-24 08:15:53.752298084 +0000 UTC m=+1115.751247333" Nov 24 08:15:54 crc kubenswrapper[4691]: I1124 08:15:54.384658 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 24 08:15:54 crc kubenswrapper[4691]: I1124 08:15:54.384992 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 24 08:15:54 crc kubenswrapper[4691]: I1124 08:15:54.753989 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"730d583b-8557-4759-b3f6-f53fe9e3b73f","Type":"ContainerStarted","Data":"7ba747b7cfed0b02381efd4adfc3bf0b4cf1cf8c47ae0f637e1ff446cff4054e"} Nov 24 08:15:54 crc kubenswrapper[4691]: I1124 08:15:54.774005 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5d3a4115-27a7-4d2a-a968-a04b5e99dd80" path="/var/lib/kubelet/pods/5d3a4115-27a7-4d2a-a968-a04b5e99dd80/volumes" Nov 24 08:15:54 crc kubenswrapper[4691]: I1124 08:15:54.774834 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.774821777 podStartE2EDuration="2.774821777s" podCreationTimestamp="2025-11-24 08:15:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:15:54.77180138 +0000 UTC m=+1116.770750629" watchObservedRunningTime="2025-11-24 08:15:54.774821777 +0000 UTC m=+1116.773771026" Nov 24 08:15:56 crc kubenswrapper[4691]: I1124 08:15:56.404384 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 24 08:15:56 crc kubenswrapper[4691]: I1124 08:15:56.406643 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="8c2f2c51-cf66-4a86-917b-52d20691e85b" containerName="kube-state-metrics" containerID="cri-o://572a4e99a55305c32a880ded67e24134685c77bf7f3fdd944a003e601c95da3f" gracePeriod=30 Nov 24 08:15:56 crc kubenswrapper[4691]: I1124 08:15:56.791440 4691 generic.go:334] "Generic (PLEG): container finished" podID="8c2f2c51-cf66-4a86-917b-52d20691e85b" containerID="572a4e99a55305c32a880ded67e24134685c77bf7f3fdd944a003e601c95da3f" exitCode=2 Nov 24 08:15:56 crc kubenswrapper[4691]: I1124 08:15:56.791671 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"8c2f2c51-cf66-4a86-917b-52d20691e85b","Type":"ContainerDied","Data":"572a4e99a55305c32a880ded67e24134685c77bf7f3fdd944a003e601c95da3f"} Nov 24 08:15:56 crc kubenswrapper[4691]: I1124 08:15:56.936810 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 24 08:15:57 crc kubenswrapper[4691]: I1124 08:15:57.055293 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w8sw2\" (UniqueName: \"kubernetes.io/projected/8c2f2c51-cf66-4a86-917b-52d20691e85b-kube-api-access-w8sw2\") pod \"8c2f2c51-cf66-4a86-917b-52d20691e85b\" (UID: \"8c2f2c51-cf66-4a86-917b-52d20691e85b\") " Nov 24 08:15:57 crc kubenswrapper[4691]: I1124 08:15:57.080298 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8c2f2c51-cf66-4a86-917b-52d20691e85b-kube-api-access-w8sw2" (OuterVolumeSpecName: "kube-api-access-w8sw2") pod "8c2f2c51-cf66-4a86-917b-52d20691e85b" (UID: "8c2f2c51-cf66-4a86-917b-52d20691e85b"). InnerVolumeSpecName "kube-api-access-w8sw2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:15:57 crc kubenswrapper[4691]: I1124 08:15:57.157496 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w8sw2\" (UniqueName: \"kubernetes.io/projected/8c2f2c51-cf66-4a86-917b-52d20691e85b-kube-api-access-w8sw2\") on node \"crc\" DevicePath \"\"" Nov 24 08:15:57 crc kubenswrapper[4691]: I1124 08:15:57.802985 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"8c2f2c51-cf66-4a86-917b-52d20691e85b","Type":"ContainerDied","Data":"b030a77a4051ef6ca1f6bcdb96ef5d6b7db13a61a886cac12e2809a181caacea"} Nov 24 08:15:57 crc kubenswrapper[4691]: I1124 08:15:57.803059 4691 scope.go:117] "RemoveContainer" containerID="572a4e99a55305c32a880ded67e24134685c77bf7f3fdd944a003e601c95da3f" Nov 24 08:15:57 crc kubenswrapper[4691]: I1124 08:15:57.803206 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 24 08:15:57 crc kubenswrapper[4691]: I1124 08:15:57.866288 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 24 08:15:57 crc kubenswrapper[4691]: I1124 08:15:57.873973 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 24 08:15:57 crc kubenswrapper[4691]: I1124 08:15:57.904393 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 24 08:15:57 crc kubenswrapper[4691]: E1124 08:15:57.905062 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c2f2c51-cf66-4a86-917b-52d20691e85b" containerName="kube-state-metrics" Nov 24 08:15:57 crc kubenswrapper[4691]: I1124 08:15:57.905083 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c2f2c51-cf66-4a86-917b-52d20691e85b" containerName="kube-state-metrics" Nov 24 08:15:57 crc kubenswrapper[4691]: I1124 08:15:57.905343 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c2f2c51-cf66-4a86-917b-52d20691e85b" containerName="kube-state-metrics" Nov 24 08:15:57 crc kubenswrapper[4691]: I1124 08:15:57.906165 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 24 08:15:57 crc kubenswrapper[4691]: I1124 08:15:57.908374 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 24 08:15:57 crc kubenswrapper[4691]: I1124 08:15:57.908549 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 24 08:15:57 crc kubenswrapper[4691]: I1124 08:15:57.926292 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 24 08:15:57 crc kubenswrapper[4691]: I1124 08:15:57.975533 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/9d9b7a95-3c3a-4254-b63e-214d34969aab-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"9d9b7a95-3c3a-4254-b63e-214d34969aab\") " pod="openstack/kube-state-metrics-0" Nov 24 08:15:57 crc kubenswrapper[4691]: I1124 08:15:57.976103 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4lx4\" (UniqueName: \"kubernetes.io/projected/9d9b7a95-3c3a-4254-b63e-214d34969aab-kube-api-access-v4lx4\") pod \"kube-state-metrics-0\" (UID: \"9d9b7a95-3c3a-4254-b63e-214d34969aab\") " pod="openstack/kube-state-metrics-0" Nov 24 08:15:57 crc kubenswrapper[4691]: I1124 08:15:57.976214 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d9b7a95-3c3a-4254-b63e-214d34969aab-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"9d9b7a95-3c3a-4254-b63e-214d34969aab\") " pod="openstack/kube-state-metrics-0" Nov 24 08:15:57 crc kubenswrapper[4691]: I1124 08:15:57.976373 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d9b7a95-3c3a-4254-b63e-214d34969aab-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"9d9b7a95-3c3a-4254-b63e-214d34969aab\") " pod="openstack/kube-state-metrics-0" Nov 24 08:15:58 crc kubenswrapper[4691]: E1124 08:15:58.020627 4691 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8c2f2c51_cf66_4a86_917b_52d20691e85b.slice/crio-b030a77a4051ef6ca1f6bcdb96ef5d6b7db13a61a886cac12e2809a181caacea\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8c2f2c51_cf66_4a86_917b_52d20691e85b.slice\": RecentStats: unable to find data in memory cache]" Nov 24 08:15:58 crc kubenswrapper[4691]: I1124 08:15:58.078510 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/9d9b7a95-3c3a-4254-b63e-214d34969aab-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"9d9b7a95-3c3a-4254-b63e-214d34969aab\") " pod="openstack/kube-state-metrics-0" Nov 24 08:15:58 crc kubenswrapper[4691]: I1124 08:15:58.078591 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4lx4\" (UniqueName: \"kubernetes.io/projected/9d9b7a95-3c3a-4254-b63e-214d34969aab-kube-api-access-v4lx4\") pod \"kube-state-metrics-0\" (UID: \"9d9b7a95-3c3a-4254-b63e-214d34969aab\") " pod="openstack/kube-state-metrics-0" Nov 24 08:15:58 crc kubenswrapper[4691]: I1124 08:15:58.078663 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d9b7a95-3c3a-4254-b63e-214d34969aab-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"9d9b7a95-3c3a-4254-b63e-214d34969aab\") " pod="openstack/kube-state-metrics-0" Nov 24 08:15:58 crc kubenswrapper[4691]: I1124 08:15:58.078691 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d9b7a95-3c3a-4254-b63e-214d34969aab-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"9d9b7a95-3c3a-4254-b63e-214d34969aab\") " pod="openstack/kube-state-metrics-0" Nov 24 08:15:58 crc kubenswrapper[4691]: I1124 08:15:58.085516 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d9b7a95-3c3a-4254-b63e-214d34969aab-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"9d9b7a95-3c3a-4254-b63e-214d34969aab\") " pod="openstack/kube-state-metrics-0" Nov 24 08:15:58 crc kubenswrapper[4691]: I1124 08:15:58.085685 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/9d9b7a95-3c3a-4254-b63e-214d34969aab-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"9d9b7a95-3c3a-4254-b63e-214d34969aab\") " pod="openstack/kube-state-metrics-0" Nov 24 08:15:58 crc kubenswrapper[4691]: I1124 08:15:58.086485 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d9b7a95-3c3a-4254-b63e-214d34969aab-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"9d9b7a95-3c3a-4254-b63e-214d34969aab\") " pod="openstack/kube-state-metrics-0" Nov 24 08:15:58 crc kubenswrapper[4691]: I1124 08:15:58.095780 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4lx4\" (UniqueName: \"kubernetes.io/projected/9d9b7a95-3c3a-4254-b63e-214d34969aab-kube-api-access-v4lx4\") pod \"kube-state-metrics-0\" (UID: \"9d9b7a95-3c3a-4254-b63e-214d34969aab\") " pod="openstack/kube-state-metrics-0" Nov 24 08:15:58 crc kubenswrapper[4691]: I1124 08:15:58.125661 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 24 08:15:58 crc kubenswrapper[4691]: I1124 08:15:58.234257 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 24 08:15:58 crc kubenswrapper[4691]: I1124 08:15:58.521671 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:15:58 crc kubenswrapper[4691]: I1124 08:15:58.526662 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="30afd527-ea6d-41a6-8f4d-8d60a2933f01" containerName="ceilometer-central-agent" containerID="cri-o://d8d801b8ad96540a03943c81a4d497b11f3ac0d1d5cc8da05a8712f527cf69b2" gracePeriod=30 Nov 24 08:15:58 crc kubenswrapper[4691]: I1124 08:15:58.526880 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="30afd527-ea6d-41a6-8f4d-8d60a2933f01" containerName="proxy-httpd" containerID="cri-o://d98f0e72d5857f865a3c4d812c97005a26803913cc6b97a323713bc39a5352f4" gracePeriod=30 Nov 24 08:15:58 crc kubenswrapper[4691]: I1124 08:15:58.526962 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="30afd527-ea6d-41a6-8f4d-8d60a2933f01" containerName="sg-core" containerID="cri-o://2354e2d01d0f268aa802c89d56dd6232d3a4567a07c94295a3db25887a830c57" gracePeriod=30 Nov 24 08:15:58 crc kubenswrapper[4691]: I1124 08:15:58.527002 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="30afd527-ea6d-41a6-8f4d-8d60a2933f01" containerName="ceilometer-notification-agent" containerID="cri-o://ed5a473f3c6bdb895459088c20032bf72e7c027e6be19c0f10e9bcdf4d05f5fb" gracePeriod=30 Nov 24 08:15:58 crc kubenswrapper[4691]: I1124 08:15:58.717771 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 24 08:15:58 crc kubenswrapper[4691]: W1124 08:15:58.719518 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9d9b7a95_3c3a_4254_b63e_214d34969aab.slice/crio-699f4dd19902c84b8966ecdb3502502bab038b862079167353cd9d7a5f6700d2 WatchSource:0}: Error finding container 699f4dd19902c84b8966ecdb3502502bab038b862079167353cd9d7a5f6700d2: Status 404 returned error can't find the container with id 699f4dd19902c84b8966ecdb3502502bab038b862079167353cd9d7a5f6700d2 Nov 24 08:15:58 crc kubenswrapper[4691]: I1124 08:15:58.774578 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8c2f2c51-cf66-4a86-917b-52d20691e85b" path="/var/lib/kubelet/pods/8c2f2c51-cf66-4a86-917b-52d20691e85b/volumes" Nov 24 08:15:58 crc kubenswrapper[4691]: I1124 08:15:58.858838 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"9d9b7a95-3c3a-4254-b63e-214d34969aab","Type":"ContainerStarted","Data":"699f4dd19902c84b8966ecdb3502502bab038b862079167353cd9d7a5f6700d2"} Nov 24 08:15:58 crc kubenswrapper[4691]: I1124 08:15:58.863030 4691 generic.go:334] "Generic (PLEG): container finished" podID="30afd527-ea6d-41a6-8f4d-8d60a2933f01" containerID="d98f0e72d5857f865a3c4d812c97005a26803913cc6b97a323713bc39a5352f4" exitCode=0 Nov 24 08:15:58 crc kubenswrapper[4691]: I1124 08:15:58.863071 4691 generic.go:334] "Generic (PLEG): container finished" podID="30afd527-ea6d-41a6-8f4d-8d60a2933f01" containerID="2354e2d01d0f268aa802c89d56dd6232d3a4567a07c94295a3db25887a830c57" exitCode=2 Nov 24 08:15:58 crc kubenswrapper[4691]: I1124 08:15:58.863089 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"30afd527-ea6d-41a6-8f4d-8d60a2933f01","Type":"ContainerDied","Data":"d98f0e72d5857f865a3c4d812c97005a26803913cc6b97a323713bc39a5352f4"} Nov 24 08:15:58 crc kubenswrapper[4691]: I1124 08:15:58.863111 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"30afd527-ea6d-41a6-8f4d-8d60a2933f01","Type":"ContainerDied","Data":"2354e2d01d0f268aa802c89d56dd6232d3a4567a07c94295a3db25887a830c57"} Nov 24 08:15:59 crc kubenswrapper[4691]: I1124 08:15:59.383940 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 24 08:15:59 crc kubenswrapper[4691]: I1124 08:15:59.384637 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 24 08:15:59 crc kubenswrapper[4691]: I1124 08:15:59.881688 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"9d9b7a95-3c3a-4254-b63e-214d34969aab","Type":"ContainerStarted","Data":"92de8b06e1aebbd6f4978c101d236f03a5dd3c9cdc08f7ba5bcc81c8de81cccd"} Nov 24 08:15:59 crc kubenswrapper[4691]: I1124 08:15:59.882158 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 24 08:15:59 crc kubenswrapper[4691]: I1124 08:15:59.886659 4691 generic.go:334] "Generic (PLEG): container finished" podID="30afd527-ea6d-41a6-8f4d-8d60a2933f01" containerID="d8d801b8ad96540a03943c81a4d497b11f3ac0d1d5cc8da05a8712f527cf69b2" exitCode=0 Nov 24 08:15:59 crc kubenswrapper[4691]: I1124 08:15:59.886762 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"30afd527-ea6d-41a6-8f4d-8d60a2933f01","Type":"ContainerDied","Data":"d8d801b8ad96540a03943c81a4d497b11f3ac0d1d5cc8da05a8712f527cf69b2"} Nov 24 08:15:59 crc kubenswrapper[4691]: I1124 08:15:59.904734 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.055199435 podStartE2EDuration="2.904712338s" podCreationTimestamp="2025-11-24 08:15:57 +0000 UTC" firstStartedPulling="2025-11-24 08:15:58.721697566 +0000 UTC m=+1120.720646815" lastFinishedPulling="2025-11-24 08:15:59.571210429 +0000 UTC m=+1121.570159718" observedRunningTime="2025-11-24 08:15:59.903287707 +0000 UTC m=+1121.902236956" watchObservedRunningTime="2025-11-24 08:15:59.904712338 +0000 UTC m=+1121.903661587" Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.399004 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="9549094b-f8c2-4131-89df-b6156a161466" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.198:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.399393 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="9549094b-f8c2-4131-89df-b6156a161466" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.198:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.508248 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.688374 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/30afd527-ea6d-41a6-8f4d-8d60a2933f01-run-httpd\") pod \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\" (UID: \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\") " Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.688499 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/30afd527-ea6d-41a6-8f4d-8d60a2933f01-scripts\") pod \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\" (UID: \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\") " Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.688557 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/30afd527-ea6d-41a6-8f4d-8d60a2933f01-sg-core-conf-yaml\") pod \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\" (UID: \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\") " Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.688670 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m2v5b\" (UniqueName: \"kubernetes.io/projected/30afd527-ea6d-41a6-8f4d-8d60a2933f01-kube-api-access-m2v5b\") pod \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\" (UID: \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\") " Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.688727 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30afd527-ea6d-41a6-8f4d-8d60a2933f01-combined-ca-bundle\") pod \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\" (UID: \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\") " Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.688921 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30afd527-ea6d-41a6-8f4d-8d60a2933f01-config-data\") pod \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\" (UID: \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\") " Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.688960 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/30afd527-ea6d-41a6-8f4d-8d60a2933f01-log-httpd\") pod \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\" (UID: \"30afd527-ea6d-41a6-8f4d-8d60a2933f01\") " Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.689516 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/30afd527-ea6d-41a6-8f4d-8d60a2933f01-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "30afd527-ea6d-41a6-8f4d-8d60a2933f01" (UID: "30afd527-ea6d-41a6-8f4d-8d60a2933f01"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.689540 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/30afd527-ea6d-41a6-8f4d-8d60a2933f01-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "30afd527-ea6d-41a6-8f4d-8d60a2933f01" (UID: "30afd527-ea6d-41a6-8f4d-8d60a2933f01"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.690848 4691 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/30afd527-ea6d-41a6-8f4d-8d60a2933f01-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.690882 4691 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/30afd527-ea6d-41a6-8f4d-8d60a2933f01-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.694790 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30afd527-ea6d-41a6-8f4d-8d60a2933f01-scripts" (OuterVolumeSpecName: "scripts") pod "30afd527-ea6d-41a6-8f4d-8d60a2933f01" (UID: "30afd527-ea6d-41a6-8f4d-8d60a2933f01"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.695711 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/30afd527-ea6d-41a6-8f4d-8d60a2933f01-kube-api-access-m2v5b" (OuterVolumeSpecName: "kube-api-access-m2v5b") pod "30afd527-ea6d-41a6-8f4d-8d60a2933f01" (UID: "30afd527-ea6d-41a6-8f4d-8d60a2933f01"). InnerVolumeSpecName "kube-api-access-m2v5b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.735611 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30afd527-ea6d-41a6-8f4d-8d60a2933f01-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "30afd527-ea6d-41a6-8f4d-8d60a2933f01" (UID: "30afd527-ea6d-41a6-8f4d-8d60a2933f01"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.777245 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30afd527-ea6d-41a6-8f4d-8d60a2933f01-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "30afd527-ea6d-41a6-8f4d-8d60a2933f01" (UID: "30afd527-ea6d-41a6-8f4d-8d60a2933f01"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.792500 4691 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/30afd527-ea6d-41a6-8f4d-8d60a2933f01-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.792538 4691 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/30afd527-ea6d-41a6-8f4d-8d60a2933f01-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.792552 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m2v5b\" (UniqueName: \"kubernetes.io/projected/30afd527-ea6d-41a6-8f4d-8d60a2933f01-kube-api-access-m2v5b\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.792564 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30afd527-ea6d-41a6-8f4d-8d60a2933f01-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.802796 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30afd527-ea6d-41a6-8f4d-8d60a2933f01-config-data" (OuterVolumeSpecName: "config-data") pod "30afd527-ea6d-41a6-8f4d-8d60a2933f01" (UID: "30afd527-ea6d-41a6-8f4d-8d60a2933f01"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.894607 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30afd527-ea6d-41a6-8f4d-8d60a2933f01-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.902707 4691 generic.go:334] "Generic (PLEG): container finished" podID="30afd527-ea6d-41a6-8f4d-8d60a2933f01" containerID="ed5a473f3c6bdb895459088c20032bf72e7c027e6be19c0f10e9bcdf4d05f5fb" exitCode=0 Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.902746 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"30afd527-ea6d-41a6-8f4d-8d60a2933f01","Type":"ContainerDied","Data":"ed5a473f3c6bdb895459088c20032bf72e7c027e6be19c0f10e9bcdf4d05f5fb"} Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.902827 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"30afd527-ea6d-41a6-8f4d-8d60a2933f01","Type":"ContainerDied","Data":"f7143dc52f074870823fdf41e955ea2962ec69e0ee1e089df5ca8a197ff2e1bc"} Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.902858 4691 scope.go:117] "RemoveContainer" containerID="d98f0e72d5857f865a3c4d812c97005a26803913cc6b97a323713bc39a5352f4" Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.903257 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.936972 4691 scope.go:117] "RemoveContainer" containerID="2354e2d01d0f268aa802c89d56dd6232d3a4567a07c94295a3db25887a830c57" Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.952973 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.966907 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.976893 4691 scope.go:117] "RemoveContainer" containerID="ed5a473f3c6bdb895459088c20032bf72e7c027e6be19c0f10e9bcdf4d05f5fb" Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.981414 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:16:00 crc kubenswrapper[4691]: E1124 08:16:00.982353 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30afd527-ea6d-41a6-8f4d-8d60a2933f01" containerName="proxy-httpd" Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.982437 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="30afd527-ea6d-41a6-8f4d-8d60a2933f01" containerName="proxy-httpd" Nov 24 08:16:00 crc kubenswrapper[4691]: E1124 08:16:00.982524 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30afd527-ea6d-41a6-8f4d-8d60a2933f01" containerName="sg-core" Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.982577 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="30afd527-ea6d-41a6-8f4d-8d60a2933f01" containerName="sg-core" Nov 24 08:16:00 crc kubenswrapper[4691]: E1124 08:16:00.982657 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30afd527-ea6d-41a6-8f4d-8d60a2933f01" containerName="ceilometer-notification-agent" Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.982762 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="30afd527-ea6d-41a6-8f4d-8d60a2933f01" containerName="ceilometer-notification-agent" Nov 24 08:16:00 crc kubenswrapper[4691]: E1124 08:16:00.982838 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30afd527-ea6d-41a6-8f4d-8d60a2933f01" containerName="ceilometer-central-agent" Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.982899 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="30afd527-ea6d-41a6-8f4d-8d60a2933f01" containerName="ceilometer-central-agent" Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.983157 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="30afd527-ea6d-41a6-8f4d-8d60a2933f01" containerName="ceilometer-notification-agent" Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.983227 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="30afd527-ea6d-41a6-8f4d-8d60a2933f01" containerName="ceilometer-central-agent" Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.983296 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="30afd527-ea6d-41a6-8f4d-8d60a2933f01" containerName="proxy-httpd" Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.983368 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="30afd527-ea6d-41a6-8f4d-8d60a2933f01" containerName="sg-core" Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.987740 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.991538 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 24 08:16:00 crc kubenswrapper[4691]: I1124 08:16:00.992160 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:00.999055 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.001478 4691 scope.go:117] "RemoveContainer" containerID="d8d801b8ad96540a03943c81a4d497b11f3ac0d1d5cc8da05a8712f527cf69b2" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.013913 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.057694 4691 scope.go:117] "RemoveContainer" containerID="d98f0e72d5857f865a3c4d812c97005a26803913cc6b97a323713bc39a5352f4" Nov 24 08:16:01 crc kubenswrapper[4691]: E1124 08:16:01.062151 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d98f0e72d5857f865a3c4d812c97005a26803913cc6b97a323713bc39a5352f4\": container with ID starting with d98f0e72d5857f865a3c4d812c97005a26803913cc6b97a323713bc39a5352f4 not found: ID does not exist" containerID="d98f0e72d5857f865a3c4d812c97005a26803913cc6b97a323713bc39a5352f4" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.062213 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d98f0e72d5857f865a3c4d812c97005a26803913cc6b97a323713bc39a5352f4"} err="failed to get container status \"d98f0e72d5857f865a3c4d812c97005a26803913cc6b97a323713bc39a5352f4\": rpc error: code = NotFound desc = could not find container \"d98f0e72d5857f865a3c4d812c97005a26803913cc6b97a323713bc39a5352f4\": container with ID starting with d98f0e72d5857f865a3c4d812c97005a26803913cc6b97a323713bc39a5352f4 not found: ID does not exist" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.062253 4691 scope.go:117] "RemoveContainer" containerID="2354e2d01d0f268aa802c89d56dd6232d3a4567a07c94295a3db25887a830c57" Nov 24 08:16:01 crc kubenswrapper[4691]: E1124 08:16:01.068206 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2354e2d01d0f268aa802c89d56dd6232d3a4567a07c94295a3db25887a830c57\": container with ID starting with 2354e2d01d0f268aa802c89d56dd6232d3a4567a07c94295a3db25887a830c57 not found: ID does not exist" containerID="2354e2d01d0f268aa802c89d56dd6232d3a4567a07c94295a3db25887a830c57" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.068266 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2354e2d01d0f268aa802c89d56dd6232d3a4567a07c94295a3db25887a830c57"} err="failed to get container status \"2354e2d01d0f268aa802c89d56dd6232d3a4567a07c94295a3db25887a830c57\": rpc error: code = NotFound desc = could not find container \"2354e2d01d0f268aa802c89d56dd6232d3a4567a07c94295a3db25887a830c57\": container with ID starting with 2354e2d01d0f268aa802c89d56dd6232d3a4567a07c94295a3db25887a830c57 not found: ID does not exist" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.068312 4691 scope.go:117] "RemoveContainer" containerID="ed5a473f3c6bdb895459088c20032bf72e7c027e6be19c0f10e9bcdf4d05f5fb" Nov 24 08:16:01 crc kubenswrapper[4691]: E1124 08:16:01.070528 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed5a473f3c6bdb895459088c20032bf72e7c027e6be19c0f10e9bcdf4d05f5fb\": container with ID starting with ed5a473f3c6bdb895459088c20032bf72e7c027e6be19c0f10e9bcdf4d05f5fb not found: ID does not exist" containerID="ed5a473f3c6bdb895459088c20032bf72e7c027e6be19c0f10e9bcdf4d05f5fb" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.070561 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed5a473f3c6bdb895459088c20032bf72e7c027e6be19c0f10e9bcdf4d05f5fb"} err="failed to get container status \"ed5a473f3c6bdb895459088c20032bf72e7c027e6be19c0f10e9bcdf4d05f5fb\": rpc error: code = NotFound desc = could not find container \"ed5a473f3c6bdb895459088c20032bf72e7c027e6be19c0f10e9bcdf4d05f5fb\": container with ID starting with ed5a473f3c6bdb895459088c20032bf72e7c027e6be19c0f10e9bcdf4d05f5fb not found: ID does not exist" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.070576 4691 scope.go:117] "RemoveContainer" containerID="d8d801b8ad96540a03943c81a4d497b11f3ac0d1d5cc8da05a8712f527cf69b2" Nov 24 08:16:01 crc kubenswrapper[4691]: E1124 08:16:01.077388 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d8d801b8ad96540a03943c81a4d497b11f3ac0d1d5cc8da05a8712f527cf69b2\": container with ID starting with d8d801b8ad96540a03943c81a4d497b11f3ac0d1d5cc8da05a8712f527cf69b2 not found: ID does not exist" containerID="d8d801b8ad96540a03943c81a4d497b11f3ac0d1d5cc8da05a8712f527cf69b2" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.077476 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d8d801b8ad96540a03943c81a4d497b11f3ac0d1d5cc8da05a8712f527cf69b2"} err="failed to get container status \"d8d801b8ad96540a03943c81a4d497b11f3ac0d1d5cc8da05a8712f527cf69b2\": rpc error: code = NotFound desc = could not find container \"d8d801b8ad96540a03943c81a4d497b11f3ac0d1d5cc8da05a8712f527cf69b2\": container with ID starting with d8d801b8ad96540a03943c81a4d497b11f3ac0d1d5cc8da05a8712f527cf69b2 not found: ID does not exist" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.099152 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f555d827-ecba-4ed4-b28c-4d1eeeb97482-scripts\") pod \"ceilometer-0\" (UID: \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\") " pod="openstack/ceilometer-0" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.099233 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f555d827-ecba-4ed4-b28c-4d1eeeb97482-log-httpd\") pod \"ceilometer-0\" (UID: \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\") " pod="openstack/ceilometer-0" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.099299 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f555d827-ecba-4ed4-b28c-4d1eeeb97482-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\") " pod="openstack/ceilometer-0" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.099330 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f555d827-ecba-4ed4-b28c-4d1eeeb97482-run-httpd\") pod \"ceilometer-0\" (UID: \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\") " pod="openstack/ceilometer-0" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.099373 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f555d827-ecba-4ed4-b28c-4d1eeeb97482-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\") " pod="openstack/ceilometer-0" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.099440 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mt469\" (UniqueName: \"kubernetes.io/projected/f555d827-ecba-4ed4-b28c-4d1eeeb97482-kube-api-access-mt469\") pod \"ceilometer-0\" (UID: \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\") " pod="openstack/ceilometer-0" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.099474 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f555d827-ecba-4ed4-b28c-4d1eeeb97482-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\") " pod="openstack/ceilometer-0" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.099498 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f555d827-ecba-4ed4-b28c-4d1eeeb97482-config-data\") pod \"ceilometer-0\" (UID: \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\") " pod="openstack/ceilometer-0" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.201519 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f555d827-ecba-4ed4-b28c-4d1eeeb97482-scripts\") pod \"ceilometer-0\" (UID: \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\") " pod="openstack/ceilometer-0" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.201589 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f555d827-ecba-4ed4-b28c-4d1eeeb97482-log-httpd\") pod \"ceilometer-0\" (UID: \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\") " pod="openstack/ceilometer-0" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.201655 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f555d827-ecba-4ed4-b28c-4d1eeeb97482-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\") " pod="openstack/ceilometer-0" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.201701 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f555d827-ecba-4ed4-b28c-4d1eeeb97482-run-httpd\") pod \"ceilometer-0\" (UID: \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\") " pod="openstack/ceilometer-0" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.201758 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f555d827-ecba-4ed4-b28c-4d1eeeb97482-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\") " pod="openstack/ceilometer-0" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.201847 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mt469\" (UniqueName: \"kubernetes.io/projected/f555d827-ecba-4ed4-b28c-4d1eeeb97482-kube-api-access-mt469\") pod \"ceilometer-0\" (UID: \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\") " pod="openstack/ceilometer-0" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.201872 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f555d827-ecba-4ed4-b28c-4d1eeeb97482-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\") " pod="openstack/ceilometer-0" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.201906 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f555d827-ecba-4ed4-b28c-4d1eeeb97482-config-data\") pod \"ceilometer-0\" (UID: \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\") " pod="openstack/ceilometer-0" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.202399 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f555d827-ecba-4ed4-b28c-4d1eeeb97482-log-httpd\") pod \"ceilometer-0\" (UID: \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\") " pod="openstack/ceilometer-0" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.203047 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f555d827-ecba-4ed4-b28c-4d1eeeb97482-run-httpd\") pod \"ceilometer-0\" (UID: \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\") " pod="openstack/ceilometer-0" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.207023 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f555d827-ecba-4ed4-b28c-4d1eeeb97482-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\") " pod="openstack/ceilometer-0" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.208422 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f555d827-ecba-4ed4-b28c-4d1eeeb97482-scripts\") pod \"ceilometer-0\" (UID: \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\") " pod="openstack/ceilometer-0" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.208636 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f555d827-ecba-4ed4-b28c-4d1eeeb97482-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\") " pod="openstack/ceilometer-0" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.213051 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f555d827-ecba-4ed4-b28c-4d1eeeb97482-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\") " pod="openstack/ceilometer-0" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.219476 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f555d827-ecba-4ed4-b28c-4d1eeeb97482-config-data\") pod \"ceilometer-0\" (UID: \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\") " pod="openstack/ceilometer-0" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.229332 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mt469\" (UniqueName: \"kubernetes.io/projected/f555d827-ecba-4ed4-b28c-4d1eeeb97482-kube-api-access-mt469\") pod \"ceilometer-0\" (UID: \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\") " pod="openstack/ceilometer-0" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.319862 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 08:16:01 crc kubenswrapper[4691]: I1124 08:16:01.974527 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:16:02 crc kubenswrapper[4691]: I1124 08:16:02.108351 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 24 08:16:02 crc kubenswrapper[4691]: I1124 08:16:02.108412 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 24 08:16:02 crc kubenswrapper[4691]: I1124 08:16:02.773378 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="30afd527-ea6d-41a6-8f4d-8d60a2933f01" path="/var/lib/kubelet/pods/30afd527-ea6d-41a6-8f4d-8d60a2933f01/volumes" Nov 24 08:16:02 crc kubenswrapper[4691]: I1124 08:16:02.929151 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f555d827-ecba-4ed4-b28c-4d1eeeb97482","Type":"ContainerStarted","Data":"7f98c191f1c8bd2d19f43e0427ab3f2715f7cc4a35235287771c9c5dba6d9e06"} Nov 24 08:16:02 crc kubenswrapper[4691]: I1124 08:16:02.929571 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f555d827-ecba-4ed4-b28c-4d1eeeb97482","Type":"ContainerStarted","Data":"60ab8c7cfd4cbc926067667f3c749a1e8bbd4803bcb412c778d0490a77100427"} Nov 24 08:16:03 crc kubenswrapper[4691]: I1124 08:16:03.125876 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 24 08:16:03 crc kubenswrapper[4691]: I1124 08:16:03.156346 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 24 08:16:03 crc kubenswrapper[4691]: I1124 08:16:03.190725 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="eadd696b-88c5-4e0c-aa45-3902eca76895" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.199:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 24 08:16:03 crc kubenswrapper[4691]: I1124 08:16:03.190765 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="eadd696b-88c5-4e0c-aa45-3902eca76895" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.199:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 24 08:16:03 crc kubenswrapper[4691]: I1124 08:16:03.942061 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f555d827-ecba-4ed4-b28c-4d1eeeb97482","Type":"ContainerStarted","Data":"fa2ffa1aa553ffb42ec52a2cfdc32450befedce3bf4d7419eeafe8d8f4ffe52f"} Nov 24 08:16:03 crc kubenswrapper[4691]: I1124 08:16:03.971612 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 24 08:16:04 crc kubenswrapper[4691]: I1124 08:16:04.954796 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f555d827-ecba-4ed4-b28c-4d1eeeb97482","Type":"ContainerStarted","Data":"35abcb6c8177d7814cb7a1f01cdad48a238957975114fdd5c730be41ba374b34"} Nov 24 08:16:07 crc kubenswrapper[4691]: I1124 08:16:07.011163 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f555d827-ecba-4ed4-b28c-4d1eeeb97482","Type":"ContainerStarted","Data":"a190b9c0245737ccc94f6900a54e7d3b5ded39d56f1a8e1e17c2c75644c04016"} Nov 24 08:16:07 crc kubenswrapper[4691]: I1124 08:16:07.012565 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 24 08:16:07 crc kubenswrapper[4691]: I1124 08:16:07.036302 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.419717086 podStartE2EDuration="7.03626575s" podCreationTimestamp="2025-11-24 08:16:00 +0000 UTC" firstStartedPulling="2025-11-24 08:16:01.977251602 +0000 UTC m=+1123.976200851" lastFinishedPulling="2025-11-24 08:16:06.593800266 +0000 UTC m=+1128.592749515" observedRunningTime="2025-11-24 08:16:07.035931861 +0000 UTC m=+1129.034881110" watchObservedRunningTime="2025-11-24 08:16:07.03626575 +0000 UTC m=+1129.035215009" Nov 24 08:16:08 crc kubenswrapper[4691]: I1124 08:16:08.255137 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 24 08:16:09 crc kubenswrapper[4691]: I1124 08:16:09.390610 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 24 08:16:09 crc kubenswrapper[4691]: I1124 08:16:09.391497 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 24 08:16:09 crc kubenswrapper[4691]: I1124 08:16:09.399643 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 24 08:16:09 crc kubenswrapper[4691]: I1124 08:16:09.404994 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 24 08:16:10 crc kubenswrapper[4691]: I1124 08:16:10.913005 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.036119 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25cbad01-f9b2-4243-b6f9-7ba26a2454d4-combined-ca-bundle\") pod \"25cbad01-f9b2-4243-b6f9-7ba26a2454d4\" (UID: \"25cbad01-f9b2-4243-b6f9-7ba26a2454d4\") " Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.036527 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-85ngw\" (UniqueName: \"kubernetes.io/projected/25cbad01-f9b2-4243-b6f9-7ba26a2454d4-kube-api-access-85ngw\") pod \"25cbad01-f9b2-4243-b6f9-7ba26a2454d4\" (UID: \"25cbad01-f9b2-4243-b6f9-7ba26a2454d4\") " Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.037278 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25cbad01-f9b2-4243-b6f9-7ba26a2454d4-config-data\") pod \"25cbad01-f9b2-4243-b6f9-7ba26a2454d4\" (UID: \"25cbad01-f9b2-4243-b6f9-7ba26a2454d4\") " Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.045651 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25cbad01-f9b2-4243-b6f9-7ba26a2454d4-kube-api-access-85ngw" (OuterVolumeSpecName: "kube-api-access-85ngw") pod "25cbad01-f9b2-4243-b6f9-7ba26a2454d4" (UID: "25cbad01-f9b2-4243-b6f9-7ba26a2454d4"). InnerVolumeSpecName "kube-api-access-85ngw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.076022 4691 generic.go:334] "Generic (PLEG): container finished" podID="25cbad01-f9b2-4243-b6f9-7ba26a2454d4" containerID="2d499b3f75b3b87e72dbb3cd8794f749db484f33de9ca1db2a49b650b3ef8aa0" exitCode=137 Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.076353 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"25cbad01-f9b2-4243-b6f9-7ba26a2454d4","Type":"ContainerDied","Data":"2d499b3f75b3b87e72dbb3cd8794f749db484f33de9ca1db2a49b650b3ef8aa0"} Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.076401 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"25cbad01-f9b2-4243-b6f9-7ba26a2454d4","Type":"ContainerDied","Data":"d072fabc73a5a92bd6212e29bf268d45f9c7c3d7dab4ebd20e5aca431f7ef2f3"} Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.076422 4691 scope.go:117] "RemoveContainer" containerID="2d499b3f75b3b87e72dbb3cd8794f749db484f33de9ca1db2a49b650b3ef8aa0" Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.079888 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.094720 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25cbad01-f9b2-4243-b6f9-7ba26a2454d4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "25cbad01-f9b2-4243-b6f9-7ba26a2454d4" (UID: "25cbad01-f9b2-4243-b6f9-7ba26a2454d4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.095935 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25cbad01-f9b2-4243-b6f9-7ba26a2454d4-config-data" (OuterVolumeSpecName: "config-data") pod "25cbad01-f9b2-4243-b6f9-7ba26a2454d4" (UID: "25cbad01-f9b2-4243-b6f9-7ba26a2454d4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.140497 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25cbad01-f9b2-4243-b6f9-7ba26a2454d4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.140547 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-85ngw\" (UniqueName: \"kubernetes.io/projected/25cbad01-f9b2-4243-b6f9-7ba26a2454d4-kube-api-access-85ngw\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.140567 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25cbad01-f9b2-4243-b6f9-7ba26a2454d4-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.186345 4691 scope.go:117] "RemoveContainer" containerID="2d499b3f75b3b87e72dbb3cd8794f749db484f33de9ca1db2a49b650b3ef8aa0" Nov 24 08:16:11 crc kubenswrapper[4691]: E1124 08:16:11.187039 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d499b3f75b3b87e72dbb3cd8794f749db484f33de9ca1db2a49b650b3ef8aa0\": container with ID starting with 2d499b3f75b3b87e72dbb3cd8794f749db484f33de9ca1db2a49b650b3ef8aa0 not found: ID does not exist" containerID="2d499b3f75b3b87e72dbb3cd8794f749db484f33de9ca1db2a49b650b3ef8aa0" Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.187108 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d499b3f75b3b87e72dbb3cd8794f749db484f33de9ca1db2a49b650b3ef8aa0"} err="failed to get container status \"2d499b3f75b3b87e72dbb3cd8794f749db484f33de9ca1db2a49b650b3ef8aa0\": rpc error: code = NotFound desc = could not find container \"2d499b3f75b3b87e72dbb3cd8794f749db484f33de9ca1db2a49b650b3ef8aa0\": container with ID starting with 2d499b3f75b3b87e72dbb3cd8794f749db484f33de9ca1db2a49b650b3ef8aa0 not found: ID does not exist" Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.420267 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.432704 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.450717 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 24 08:16:11 crc kubenswrapper[4691]: E1124 08:16:11.451256 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25cbad01-f9b2-4243-b6f9-7ba26a2454d4" containerName="nova-cell1-novncproxy-novncproxy" Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.451285 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="25cbad01-f9b2-4243-b6f9-7ba26a2454d4" containerName="nova-cell1-novncproxy-novncproxy" Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.451655 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="25cbad01-f9b2-4243-b6f9-7ba26a2454d4" containerName="nova-cell1-novncproxy-novncproxy" Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.452612 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.455642 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.455970 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.456244 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.467599 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.550839 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2mdxk\" (UniqueName: \"kubernetes.io/projected/d624020f-236a-4048-acb6-a7db917757f6-kube-api-access-2mdxk\") pod \"nova-cell1-novncproxy-0\" (UID: \"d624020f-236a-4048-acb6-a7db917757f6\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.551416 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/d624020f-236a-4048-acb6-a7db917757f6-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d624020f-236a-4048-acb6-a7db917757f6\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.551606 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/d624020f-236a-4048-acb6-a7db917757f6-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d624020f-236a-4048-acb6-a7db917757f6\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.551791 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d624020f-236a-4048-acb6-a7db917757f6-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"d624020f-236a-4048-acb6-a7db917757f6\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.552033 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d624020f-236a-4048-acb6-a7db917757f6-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"d624020f-236a-4048-acb6-a7db917757f6\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.653895 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d624020f-236a-4048-acb6-a7db917757f6-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"d624020f-236a-4048-acb6-a7db917757f6\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.653999 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d624020f-236a-4048-acb6-a7db917757f6-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"d624020f-236a-4048-acb6-a7db917757f6\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.654058 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2mdxk\" (UniqueName: \"kubernetes.io/projected/d624020f-236a-4048-acb6-a7db917757f6-kube-api-access-2mdxk\") pod \"nova-cell1-novncproxy-0\" (UID: \"d624020f-236a-4048-acb6-a7db917757f6\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.654111 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/d624020f-236a-4048-acb6-a7db917757f6-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d624020f-236a-4048-acb6-a7db917757f6\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.654131 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/d624020f-236a-4048-acb6-a7db917757f6-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d624020f-236a-4048-acb6-a7db917757f6\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.661176 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/d624020f-236a-4048-acb6-a7db917757f6-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d624020f-236a-4048-acb6-a7db917757f6\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.661210 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d624020f-236a-4048-acb6-a7db917757f6-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"d624020f-236a-4048-acb6-a7db917757f6\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.661525 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d624020f-236a-4048-acb6-a7db917757f6-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"d624020f-236a-4048-acb6-a7db917757f6\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.664624 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/d624020f-236a-4048-acb6-a7db917757f6-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d624020f-236a-4048-acb6-a7db917757f6\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.679185 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2mdxk\" (UniqueName: \"kubernetes.io/projected/d624020f-236a-4048-acb6-a7db917757f6-kube-api-access-2mdxk\") pod \"nova-cell1-novncproxy-0\" (UID: \"d624020f-236a-4048-acb6-a7db917757f6\") " pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:16:11 crc kubenswrapper[4691]: I1124 08:16:11.776363 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:16:12 crc kubenswrapper[4691]: I1124 08:16:12.112146 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 24 08:16:12 crc kubenswrapper[4691]: I1124 08:16:12.113517 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 24 08:16:12 crc kubenswrapper[4691]: I1124 08:16:12.113894 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 24 08:16:12 crc kubenswrapper[4691]: I1124 08:16:12.118243 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 24 08:16:12 crc kubenswrapper[4691]: I1124 08:16:12.289414 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 24 08:16:12 crc kubenswrapper[4691]: W1124 08:16:12.300632 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd624020f_236a_4048_acb6_a7db917757f6.slice/crio-a1848f80687aebef7cb40f23757f4115b3c7546201a6237198cde44ff1a83279 WatchSource:0}: Error finding container a1848f80687aebef7cb40f23757f4115b3c7546201a6237198cde44ff1a83279: Status 404 returned error can't find the container with id a1848f80687aebef7cb40f23757f4115b3c7546201a6237198cde44ff1a83279 Nov 24 08:16:12 crc kubenswrapper[4691]: I1124 08:16:12.798151 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25cbad01-f9b2-4243-b6f9-7ba26a2454d4" path="/var/lib/kubelet/pods/25cbad01-f9b2-4243-b6f9-7ba26a2454d4/volumes" Nov 24 08:16:13 crc kubenswrapper[4691]: I1124 08:16:13.106682 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"d624020f-236a-4048-acb6-a7db917757f6","Type":"ContainerStarted","Data":"4a23fe4f7063a84e6208c08187cfd24221ac90e01957f1167c5be2b5d1eb73ea"} Nov 24 08:16:13 crc kubenswrapper[4691]: I1124 08:16:13.106797 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"d624020f-236a-4048-acb6-a7db917757f6","Type":"ContainerStarted","Data":"a1848f80687aebef7cb40f23757f4115b3c7546201a6237198cde44ff1a83279"} Nov 24 08:16:13 crc kubenswrapper[4691]: I1124 08:16:13.107100 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 24 08:16:13 crc kubenswrapper[4691]: I1124 08:16:13.112345 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 24 08:16:13 crc kubenswrapper[4691]: I1124 08:16:13.139784 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.139762528 podStartE2EDuration="2.139762528s" podCreationTimestamp="2025-11-24 08:16:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:16:13.129902262 +0000 UTC m=+1135.128851551" watchObservedRunningTime="2025-11-24 08:16:13.139762528 +0000 UTC m=+1135.138711777" Nov 24 08:16:13 crc kubenswrapper[4691]: I1124 08:16:13.337932 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-pqq4l"] Nov 24 08:16:13 crc kubenswrapper[4691]: I1124 08:16:13.339569 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-pqq4l" Nov 24 08:16:13 crc kubenswrapper[4691]: I1124 08:16:13.387812 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-pqq4l"] Nov 24 08:16:13 crc kubenswrapper[4691]: I1124 08:16:13.402628 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dd8fb0be-4983-4cd1-b412-0c170edb6565-ovsdbserver-sb\") pod \"dnsmasq-dns-cd5cbd7b9-pqq4l\" (UID: \"dd8fb0be-4983-4cd1-b412-0c170edb6565\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-pqq4l" Nov 24 08:16:13 crc kubenswrapper[4691]: I1124 08:16:13.402716 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd8fb0be-4983-4cd1-b412-0c170edb6565-config\") pod \"dnsmasq-dns-cd5cbd7b9-pqq4l\" (UID: \"dd8fb0be-4983-4cd1-b412-0c170edb6565\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-pqq4l" Nov 24 08:16:13 crc kubenswrapper[4691]: I1124 08:16:13.402767 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dd8fb0be-4983-4cd1-b412-0c170edb6565-ovsdbserver-nb\") pod \"dnsmasq-dns-cd5cbd7b9-pqq4l\" (UID: \"dd8fb0be-4983-4cd1-b412-0c170edb6565\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-pqq4l" Nov 24 08:16:13 crc kubenswrapper[4691]: I1124 08:16:13.402814 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dd8fb0be-4983-4cd1-b412-0c170edb6565-dns-svc\") pod \"dnsmasq-dns-cd5cbd7b9-pqq4l\" (UID: \"dd8fb0be-4983-4cd1-b412-0c170edb6565\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-pqq4l" Nov 24 08:16:13 crc kubenswrapper[4691]: I1124 08:16:13.402865 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dd8fb0be-4983-4cd1-b412-0c170edb6565-dns-swift-storage-0\") pod \"dnsmasq-dns-cd5cbd7b9-pqq4l\" (UID: \"dd8fb0be-4983-4cd1-b412-0c170edb6565\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-pqq4l" Nov 24 08:16:13 crc kubenswrapper[4691]: I1124 08:16:13.402917 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pq8d8\" (UniqueName: \"kubernetes.io/projected/dd8fb0be-4983-4cd1-b412-0c170edb6565-kube-api-access-pq8d8\") pod \"dnsmasq-dns-cd5cbd7b9-pqq4l\" (UID: \"dd8fb0be-4983-4cd1-b412-0c170edb6565\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-pqq4l" Nov 24 08:16:13 crc kubenswrapper[4691]: I1124 08:16:13.507537 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dd8fb0be-4983-4cd1-b412-0c170edb6565-ovsdbserver-nb\") pod \"dnsmasq-dns-cd5cbd7b9-pqq4l\" (UID: \"dd8fb0be-4983-4cd1-b412-0c170edb6565\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-pqq4l" Nov 24 08:16:13 crc kubenswrapper[4691]: I1124 08:16:13.507619 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dd8fb0be-4983-4cd1-b412-0c170edb6565-dns-svc\") pod \"dnsmasq-dns-cd5cbd7b9-pqq4l\" (UID: \"dd8fb0be-4983-4cd1-b412-0c170edb6565\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-pqq4l" Nov 24 08:16:13 crc kubenswrapper[4691]: I1124 08:16:13.507672 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dd8fb0be-4983-4cd1-b412-0c170edb6565-dns-swift-storage-0\") pod \"dnsmasq-dns-cd5cbd7b9-pqq4l\" (UID: \"dd8fb0be-4983-4cd1-b412-0c170edb6565\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-pqq4l" Nov 24 08:16:13 crc kubenswrapper[4691]: I1124 08:16:13.507702 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pq8d8\" (UniqueName: \"kubernetes.io/projected/dd8fb0be-4983-4cd1-b412-0c170edb6565-kube-api-access-pq8d8\") pod \"dnsmasq-dns-cd5cbd7b9-pqq4l\" (UID: \"dd8fb0be-4983-4cd1-b412-0c170edb6565\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-pqq4l" Nov 24 08:16:13 crc kubenswrapper[4691]: I1124 08:16:13.507765 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dd8fb0be-4983-4cd1-b412-0c170edb6565-ovsdbserver-sb\") pod \"dnsmasq-dns-cd5cbd7b9-pqq4l\" (UID: \"dd8fb0be-4983-4cd1-b412-0c170edb6565\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-pqq4l" Nov 24 08:16:13 crc kubenswrapper[4691]: I1124 08:16:13.507815 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd8fb0be-4983-4cd1-b412-0c170edb6565-config\") pod \"dnsmasq-dns-cd5cbd7b9-pqq4l\" (UID: \"dd8fb0be-4983-4cd1-b412-0c170edb6565\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-pqq4l" Nov 24 08:16:13 crc kubenswrapper[4691]: I1124 08:16:13.509162 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd8fb0be-4983-4cd1-b412-0c170edb6565-config\") pod \"dnsmasq-dns-cd5cbd7b9-pqq4l\" (UID: \"dd8fb0be-4983-4cd1-b412-0c170edb6565\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-pqq4l" Nov 24 08:16:13 crc kubenswrapper[4691]: I1124 08:16:13.509939 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dd8fb0be-4983-4cd1-b412-0c170edb6565-ovsdbserver-nb\") pod \"dnsmasq-dns-cd5cbd7b9-pqq4l\" (UID: \"dd8fb0be-4983-4cd1-b412-0c170edb6565\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-pqq4l" Nov 24 08:16:13 crc kubenswrapper[4691]: I1124 08:16:13.510570 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dd8fb0be-4983-4cd1-b412-0c170edb6565-ovsdbserver-sb\") pod \"dnsmasq-dns-cd5cbd7b9-pqq4l\" (UID: \"dd8fb0be-4983-4cd1-b412-0c170edb6565\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-pqq4l" Nov 24 08:16:13 crc kubenswrapper[4691]: I1124 08:16:13.511172 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dd8fb0be-4983-4cd1-b412-0c170edb6565-dns-svc\") pod \"dnsmasq-dns-cd5cbd7b9-pqq4l\" (UID: \"dd8fb0be-4983-4cd1-b412-0c170edb6565\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-pqq4l" Nov 24 08:16:13 crc kubenswrapper[4691]: I1124 08:16:13.511392 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dd8fb0be-4983-4cd1-b412-0c170edb6565-dns-swift-storage-0\") pod \"dnsmasq-dns-cd5cbd7b9-pqq4l\" (UID: \"dd8fb0be-4983-4cd1-b412-0c170edb6565\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-pqq4l" Nov 24 08:16:13 crc kubenswrapper[4691]: I1124 08:16:13.530469 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pq8d8\" (UniqueName: \"kubernetes.io/projected/dd8fb0be-4983-4cd1-b412-0c170edb6565-kube-api-access-pq8d8\") pod \"dnsmasq-dns-cd5cbd7b9-pqq4l\" (UID: \"dd8fb0be-4983-4cd1-b412-0c170edb6565\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-pqq4l" Nov 24 08:16:13 crc kubenswrapper[4691]: I1124 08:16:13.676238 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-pqq4l" Nov 24 08:16:14 crc kubenswrapper[4691]: I1124 08:16:14.218163 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-pqq4l"] Nov 24 08:16:15 crc kubenswrapper[4691]: I1124 08:16:15.143046 4691 generic.go:334] "Generic (PLEG): container finished" podID="dd8fb0be-4983-4cd1-b412-0c170edb6565" containerID="b3e97fa35c97c05c5a6724781a977c2e2c401a39bf1b847fc1c9748377c2f106" exitCode=0 Nov 24 08:16:15 crc kubenswrapper[4691]: I1124 08:16:15.143121 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-pqq4l" event={"ID":"dd8fb0be-4983-4cd1-b412-0c170edb6565","Type":"ContainerDied","Data":"b3e97fa35c97c05c5a6724781a977c2e2c401a39bf1b847fc1c9748377c2f106"} Nov 24 08:16:15 crc kubenswrapper[4691]: I1124 08:16:15.143410 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-pqq4l" event={"ID":"dd8fb0be-4983-4cd1-b412-0c170edb6565","Type":"ContainerStarted","Data":"e6bc689f302bb71988ec06b0453a5540aba74104613b448dd8a95a7a1115c4e7"} Nov 24 08:16:15 crc kubenswrapper[4691]: I1124 08:16:15.917817 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:16:15 crc kubenswrapper[4691]: I1124 08:16:15.918362 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f555d827-ecba-4ed4-b28c-4d1eeeb97482" containerName="sg-core" containerID="cri-o://35abcb6c8177d7814cb7a1f01cdad48a238957975114fdd5c730be41ba374b34" gracePeriod=30 Nov 24 08:16:15 crc kubenswrapper[4691]: I1124 08:16:15.918426 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f555d827-ecba-4ed4-b28c-4d1eeeb97482" containerName="ceilometer-notification-agent" containerID="cri-o://fa2ffa1aa553ffb42ec52a2cfdc32450befedce3bf4d7419eeafe8d8f4ffe52f" gracePeriod=30 Nov 24 08:16:15 crc kubenswrapper[4691]: I1124 08:16:15.918404 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f555d827-ecba-4ed4-b28c-4d1eeeb97482" containerName="proxy-httpd" containerID="cri-o://a190b9c0245737ccc94f6900a54e7d3b5ded39d56f1a8e1e17c2c75644c04016" gracePeriod=30 Nov 24 08:16:15 crc kubenswrapper[4691]: I1124 08:16:15.919025 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f555d827-ecba-4ed4-b28c-4d1eeeb97482" containerName="ceilometer-central-agent" containerID="cri-o://7f98c191f1c8bd2d19f43e0427ab3f2715f7cc4a35235287771c9c5dba6d9e06" gracePeriod=30 Nov 24 08:16:16 crc kubenswrapper[4691]: I1124 08:16:16.103387 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 24 08:16:16 crc kubenswrapper[4691]: I1124 08:16:16.157286 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-pqq4l" event={"ID":"dd8fb0be-4983-4cd1-b412-0c170edb6565","Type":"ContainerStarted","Data":"a7d9fa456c5dcccd50a8c3e8c704c957e6c0ee57868d18e70db5f64d25439421"} Nov 24 08:16:16 crc kubenswrapper[4691]: I1124 08:16:16.157393 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-cd5cbd7b9-pqq4l" Nov 24 08:16:16 crc kubenswrapper[4691]: I1124 08:16:16.163200 4691 generic.go:334] "Generic (PLEG): container finished" podID="f555d827-ecba-4ed4-b28c-4d1eeeb97482" containerID="a190b9c0245737ccc94f6900a54e7d3b5ded39d56f1a8e1e17c2c75644c04016" exitCode=0 Nov 24 08:16:16 crc kubenswrapper[4691]: I1124 08:16:16.163242 4691 generic.go:334] "Generic (PLEG): container finished" podID="f555d827-ecba-4ed4-b28c-4d1eeeb97482" containerID="35abcb6c8177d7814cb7a1f01cdad48a238957975114fdd5c730be41ba374b34" exitCode=2 Nov 24 08:16:16 crc kubenswrapper[4691]: I1124 08:16:16.163297 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f555d827-ecba-4ed4-b28c-4d1eeeb97482","Type":"ContainerDied","Data":"a190b9c0245737ccc94f6900a54e7d3b5ded39d56f1a8e1e17c2c75644c04016"} Nov 24 08:16:16 crc kubenswrapper[4691]: I1124 08:16:16.163380 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f555d827-ecba-4ed4-b28c-4d1eeeb97482","Type":"ContainerDied","Data":"35abcb6c8177d7814cb7a1f01cdad48a238957975114fdd5c730be41ba374b34"} Nov 24 08:16:16 crc kubenswrapper[4691]: I1124 08:16:16.163458 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="eadd696b-88c5-4e0c-aa45-3902eca76895" containerName="nova-api-log" containerID="cri-o://06c84d588addc7123b511a4130eac9c444c4a659b998be5ec401aff32dd71c7d" gracePeriod=30 Nov 24 08:16:16 crc kubenswrapper[4691]: I1124 08:16:16.164620 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="eadd696b-88c5-4e0c-aa45-3902eca76895" containerName="nova-api-api" containerID="cri-o://319d338eabfb730de8011e065b56fdc10c38e02e1c158ff3841df300cb257b2c" gracePeriod=30 Nov 24 08:16:16 crc kubenswrapper[4691]: I1124 08:16:16.777296 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:16:17 crc kubenswrapper[4691]: I1124 08:16:17.183287 4691 generic.go:334] "Generic (PLEG): container finished" podID="f555d827-ecba-4ed4-b28c-4d1eeeb97482" containerID="fa2ffa1aa553ffb42ec52a2cfdc32450befedce3bf4d7419eeafe8d8f4ffe52f" exitCode=0 Nov 24 08:16:17 crc kubenswrapper[4691]: I1124 08:16:17.183787 4691 generic.go:334] "Generic (PLEG): container finished" podID="f555d827-ecba-4ed4-b28c-4d1eeeb97482" containerID="7f98c191f1c8bd2d19f43e0427ab3f2715f7cc4a35235287771c9c5dba6d9e06" exitCode=0 Nov 24 08:16:17 crc kubenswrapper[4691]: I1124 08:16:17.183847 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f555d827-ecba-4ed4-b28c-4d1eeeb97482","Type":"ContainerDied","Data":"fa2ffa1aa553ffb42ec52a2cfdc32450befedce3bf4d7419eeafe8d8f4ffe52f"} Nov 24 08:16:17 crc kubenswrapper[4691]: I1124 08:16:17.183889 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f555d827-ecba-4ed4-b28c-4d1eeeb97482","Type":"ContainerDied","Data":"7f98c191f1c8bd2d19f43e0427ab3f2715f7cc4a35235287771c9c5dba6d9e06"} Nov 24 08:16:17 crc kubenswrapper[4691]: I1124 08:16:17.185742 4691 generic.go:334] "Generic (PLEG): container finished" podID="eadd696b-88c5-4e0c-aa45-3902eca76895" containerID="06c84d588addc7123b511a4130eac9c444c4a659b998be5ec401aff32dd71c7d" exitCode=143 Nov 24 08:16:17 crc kubenswrapper[4691]: I1124 08:16:17.186961 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"eadd696b-88c5-4e0c-aa45-3902eca76895","Type":"ContainerDied","Data":"06c84d588addc7123b511a4130eac9c444c4a659b998be5ec401aff32dd71c7d"} Nov 24 08:16:17 crc kubenswrapper[4691]: I1124 08:16:17.380869 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 08:16:17 crc kubenswrapper[4691]: I1124 08:16:17.413774 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-cd5cbd7b9-pqq4l" podStartSLOduration=4.41375178 podStartE2EDuration="4.41375178s" podCreationTimestamp="2025-11-24 08:16:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:16:16.192385157 +0000 UTC m=+1138.191334406" watchObservedRunningTime="2025-11-24 08:16:17.41375178 +0000 UTC m=+1139.412701029" Nov 24 08:16:17 crc kubenswrapper[4691]: I1124 08:16:17.503397 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f555d827-ecba-4ed4-b28c-4d1eeeb97482-combined-ca-bundle\") pod \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\" (UID: \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\") " Nov 24 08:16:17 crc kubenswrapper[4691]: I1124 08:16:17.503476 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f555d827-ecba-4ed4-b28c-4d1eeeb97482-log-httpd\") pod \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\" (UID: \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\") " Nov 24 08:16:17 crc kubenswrapper[4691]: I1124 08:16:17.503516 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f555d827-ecba-4ed4-b28c-4d1eeeb97482-scripts\") pod \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\" (UID: \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\") " Nov 24 08:16:17 crc kubenswrapper[4691]: I1124 08:16:17.503537 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f555d827-ecba-4ed4-b28c-4d1eeeb97482-sg-core-conf-yaml\") pod \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\" (UID: \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\") " Nov 24 08:16:17 crc kubenswrapper[4691]: I1124 08:16:17.503597 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f555d827-ecba-4ed4-b28c-4d1eeeb97482-run-httpd\") pod \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\" (UID: \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\") " Nov 24 08:16:17 crc kubenswrapper[4691]: I1124 08:16:17.503642 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f555d827-ecba-4ed4-b28c-4d1eeeb97482-config-data\") pod \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\" (UID: \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\") " Nov 24 08:16:17 crc kubenswrapper[4691]: I1124 08:16:17.503713 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mt469\" (UniqueName: \"kubernetes.io/projected/f555d827-ecba-4ed4-b28c-4d1eeeb97482-kube-api-access-mt469\") pod \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\" (UID: \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\") " Nov 24 08:16:17 crc kubenswrapper[4691]: I1124 08:16:17.505174 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f555d827-ecba-4ed4-b28c-4d1eeeb97482-ceilometer-tls-certs\") pod \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\" (UID: \"f555d827-ecba-4ed4-b28c-4d1eeeb97482\") " Nov 24 08:16:17 crc kubenswrapper[4691]: I1124 08:16:17.504114 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f555d827-ecba-4ed4-b28c-4d1eeeb97482-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "f555d827-ecba-4ed4-b28c-4d1eeeb97482" (UID: "f555d827-ecba-4ed4-b28c-4d1eeeb97482"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:16:17 crc kubenswrapper[4691]: I1124 08:16:17.504222 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f555d827-ecba-4ed4-b28c-4d1eeeb97482-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "f555d827-ecba-4ed4-b28c-4d1eeeb97482" (UID: "f555d827-ecba-4ed4-b28c-4d1eeeb97482"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:16:17 crc kubenswrapper[4691]: I1124 08:16:17.507176 4691 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f555d827-ecba-4ed4-b28c-4d1eeeb97482-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:17 crc kubenswrapper[4691]: I1124 08:16:17.507313 4691 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f555d827-ecba-4ed4-b28c-4d1eeeb97482-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:17 crc kubenswrapper[4691]: I1124 08:16:17.525789 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f555d827-ecba-4ed4-b28c-4d1eeeb97482-kube-api-access-mt469" (OuterVolumeSpecName: "kube-api-access-mt469") pod "f555d827-ecba-4ed4-b28c-4d1eeeb97482" (UID: "f555d827-ecba-4ed4-b28c-4d1eeeb97482"). InnerVolumeSpecName "kube-api-access-mt469". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:16:17 crc kubenswrapper[4691]: I1124 08:16:17.526688 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f555d827-ecba-4ed4-b28c-4d1eeeb97482-scripts" (OuterVolumeSpecName: "scripts") pod "f555d827-ecba-4ed4-b28c-4d1eeeb97482" (UID: "f555d827-ecba-4ed4-b28c-4d1eeeb97482"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:16:17 crc kubenswrapper[4691]: I1124 08:16:17.552161 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f555d827-ecba-4ed4-b28c-4d1eeeb97482-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "f555d827-ecba-4ed4-b28c-4d1eeeb97482" (UID: "f555d827-ecba-4ed4-b28c-4d1eeeb97482"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:16:17 crc kubenswrapper[4691]: I1124 08:16:17.597903 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f555d827-ecba-4ed4-b28c-4d1eeeb97482-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "f555d827-ecba-4ed4-b28c-4d1eeeb97482" (UID: "f555d827-ecba-4ed4-b28c-4d1eeeb97482"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:16:17 crc kubenswrapper[4691]: I1124 08:16:17.610428 4691 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f555d827-ecba-4ed4-b28c-4d1eeeb97482-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:17 crc kubenswrapper[4691]: I1124 08:16:17.610492 4691 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f555d827-ecba-4ed4-b28c-4d1eeeb97482-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:17 crc kubenswrapper[4691]: I1124 08:16:17.610516 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mt469\" (UniqueName: \"kubernetes.io/projected/f555d827-ecba-4ed4-b28c-4d1eeeb97482-kube-api-access-mt469\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:17 crc kubenswrapper[4691]: I1124 08:16:17.610531 4691 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f555d827-ecba-4ed4-b28c-4d1eeeb97482-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:17 crc kubenswrapper[4691]: I1124 08:16:17.625474 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f555d827-ecba-4ed4-b28c-4d1eeeb97482-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f555d827-ecba-4ed4-b28c-4d1eeeb97482" (UID: "f555d827-ecba-4ed4-b28c-4d1eeeb97482"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:16:17 crc kubenswrapper[4691]: I1124 08:16:17.682616 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f555d827-ecba-4ed4-b28c-4d1eeeb97482-config-data" (OuterVolumeSpecName: "config-data") pod "f555d827-ecba-4ed4-b28c-4d1eeeb97482" (UID: "f555d827-ecba-4ed4-b28c-4d1eeeb97482"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:16:17 crc kubenswrapper[4691]: I1124 08:16:17.711961 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f555d827-ecba-4ed4-b28c-4d1eeeb97482-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:17 crc kubenswrapper[4691]: I1124 08:16:17.711995 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f555d827-ecba-4ed4-b28c-4d1eeeb97482-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.200813 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f555d827-ecba-4ed4-b28c-4d1eeeb97482","Type":"ContainerDied","Data":"60ab8c7cfd4cbc926067667f3c749a1e8bbd4803bcb412c778d0490a77100427"} Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.201976 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.202143 4691 scope.go:117] "RemoveContainer" containerID="a190b9c0245737ccc94f6900a54e7d3b5ded39d56f1a8e1e17c2c75644c04016" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.243983 4691 scope.go:117] "RemoveContainer" containerID="35abcb6c8177d7814cb7a1f01cdad48a238957975114fdd5c730be41ba374b34" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.252553 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.270054 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.276086 4691 scope.go:117] "RemoveContainer" containerID="fa2ffa1aa553ffb42ec52a2cfdc32450befedce3bf4d7419eeafe8d8f4ffe52f" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.281865 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:16:18 crc kubenswrapper[4691]: E1124 08:16:18.282697 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f555d827-ecba-4ed4-b28c-4d1eeeb97482" containerName="ceilometer-notification-agent" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.282790 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="f555d827-ecba-4ed4-b28c-4d1eeeb97482" containerName="ceilometer-notification-agent" Nov 24 08:16:18 crc kubenswrapper[4691]: E1124 08:16:18.282872 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f555d827-ecba-4ed4-b28c-4d1eeeb97482" containerName="sg-core" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.282962 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="f555d827-ecba-4ed4-b28c-4d1eeeb97482" containerName="sg-core" Nov 24 08:16:18 crc kubenswrapper[4691]: E1124 08:16:18.283052 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f555d827-ecba-4ed4-b28c-4d1eeeb97482" containerName="proxy-httpd" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.283119 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="f555d827-ecba-4ed4-b28c-4d1eeeb97482" containerName="proxy-httpd" Nov 24 08:16:18 crc kubenswrapper[4691]: E1124 08:16:18.283211 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f555d827-ecba-4ed4-b28c-4d1eeeb97482" containerName="ceilometer-central-agent" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.283274 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="f555d827-ecba-4ed4-b28c-4d1eeeb97482" containerName="ceilometer-central-agent" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.283546 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="f555d827-ecba-4ed4-b28c-4d1eeeb97482" containerName="ceilometer-notification-agent" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.283648 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="f555d827-ecba-4ed4-b28c-4d1eeeb97482" containerName="proxy-httpd" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.283748 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="f555d827-ecba-4ed4-b28c-4d1eeeb97482" containerName="sg-core" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.283817 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="f555d827-ecba-4ed4-b28c-4d1eeeb97482" containerName="ceilometer-central-agent" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.287602 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.290641 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.290960 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.299989 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.304619 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.331079 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/d97672fd-30ed-424c-a3fc-f3df0e0f4083-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\") " pod="openstack/ceilometer-0" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.335560 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d97672fd-30ed-424c-a3fc-f3df0e0f4083-scripts\") pod \"ceilometer-0\" (UID: \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\") " pod="openstack/ceilometer-0" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.335609 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d97672fd-30ed-424c-a3fc-f3df0e0f4083-run-httpd\") pod \"ceilometer-0\" (UID: \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\") " pod="openstack/ceilometer-0" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.335649 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8sklp\" (UniqueName: \"kubernetes.io/projected/d97672fd-30ed-424c-a3fc-f3df0e0f4083-kube-api-access-8sklp\") pod \"ceilometer-0\" (UID: \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\") " pod="openstack/ceilometer-0" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.335886 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d97672fd-30ed-424c-a3fc-f3df0e0f4083-config-data\") pod \"ceilometer-0\" (UID: \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\") " pod="openstack/ceilometer-0" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.335919 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d97672fd-30ed-424c-a3fc-f3df0e0f4083-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\") " pod="openstack/ceilometer-0" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.335960 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d97672fd-30ed-424c-a3fc-f3df0e0f4083-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\") " pod="openstack/ceilometer-0" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.336026 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d97672fd-30ed-424c-a3fc-f3df0e0f4083-log-httpd\") pod \"ceilometer-0\" (UID: \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\") " pod="openstack/ceilometer-0" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.375963 4691 scope.go:117] "RemoveContainer" containerID="7f98c191f1c8bd2d19f43e0427ab3f2715f7cc4a35235287771c9c5dba6d9e06" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.437807 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d97672fd-30ed-424c-a3fc-f3df0e0f4083-config-data\") pod \"ceilometer-0\" (UID: \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\") " pod="openstack/ceilometer-0" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.437861 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d97672fd-30ed-424c-a3fc-f3df0e0f4083-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\") " pod="openstack/ceilometer-0" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.437899 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d97672fd-30ed-424c-a3fc-f3df0e0f4083-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\") " pod="openstack/ceilometer-0" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.437928 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d97672fd-30ed-424c-a3fc-f3df0e0f4083-log-httpd\") pod \"ceilometer-0\" (UID: \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\") " pod="openstack/ceilometer-0" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.438020 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/d97672fd-30ed-424c-a3fc-f3df0e0f4083-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\") " pod="openstack/ceilometer-0" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.438050 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d97672fd-30ed-424c-a3fc-f3df0e0f4083-scripts\") pod \"ceilometer-0\" (UID: \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\") " pod="openstack/ceilometer-0" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.438069 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d97672fd-30ed-424c-a3fc-f3df0e0f4083-run-httpd\") pod \"ceilometer-0\" (UID: \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\") " pod="openstack/ceilometer-0" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.438095 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8sklp\" (UniqueName: \"kubernetes.io/projected/d97672fd-30ed-424c-a3fc-f3df0e0f4083-kube-api-access-8sklp\") pod \"ceilometer-0\" (UID: \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\") " pod="openstack/ceilometer-0" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.439202 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d97672fd-30ed-424c-a3fc-f3df0e0f4083-log-httpd\") pod \"ceilometer-0\" (UID: \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\") " pod="openstack/ceilometer-0" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.439901 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d97672fd-30ed-424c-a3fc-f3df0e0f4083-run-httpd\") pod \"ceilometer-0\" (UID: \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\") " pod="openstack/ceilometer-0" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.443977 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d97672fd-30ed-424c-a3fc-f3df0e0f4083-scripts\") pod \"ceilometer-0\" (UID: \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\") " pod="openstack/ceilometer-0" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.446476 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d97672fd-30ed-424c-a3fc-f3df0e0f4083-config-data\") pod \"ceilometer-0\" (UID: \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\") " pod="openstack/ceilometer-0" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.447420 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d97672fd-30ed-424c-a3fc-f3df0e0f4083-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\") " pod="openstack/ceilometer-0" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.462656 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/d97672fd-30ed-424c-a3fc-f3df0e0f4083-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\") " pod="openstack/ceilometer-0" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.462830 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8sklp\" (UniqueName: \"kubernetes.io/projected/d97672fd-30ed-424c-a3fc-f3df0e0f4083-kube-api-access-8sklp\") pod \"ceilometer-0\" (UID: \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\") " pod="openstack/ceilometer-0" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.463879 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d97672fd-30ed-424c-a3fc-f3df0e0f4083-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\") " pod="openstack/ceilometer-0" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.506641 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.507837 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 08:16:18 crc kubenswrapper[4691]: I1124 08:16:18.817140 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f555d827-ecba-4ed4-b28c-4d1eeeb97482" path="/var/lib/kubelet/pods/f555d827-ecba-4ed4-b28c-4d1eeeb97482/volumes" Nov 24 08:16:19 crc kubenswrapper[4691]: I1124 08:16:19.026830 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:16:19 crc kubenswrapper[4691]: I1124 08:16:19.216478 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d97672fd-30ed-424c-a3fc-f3df0e0f4083","Type":"ContainerStarted","Data":"a63488da99334b6ba7d0d1155a1b25f34b2200e9ec1dfa53f78925a090888ef5"} Nov 24 08:16:19 crc kubenswrapper[4691]: I1124 08:16:19.685285 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 08:16:19 crc kubenswrapper[4691]: I1124 08:16:19.768737 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eadd696b-88c5-4e0c-aa45-3902eca76895-combined-ca-bundle\") pod \"eadd696b-88c5-4e0c-aa45-3902eca76895\" (UID: \"eadd696b-88c5-4e0c-aa45-3902eca76895\") " Nov 24 08:16:19 crc kubenswrapper[4691]: I1124 08:16:19.769089 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2mqpk\" (UniqueName: \"kubernetes.io/projected/eadd696b-88c5-4e0c-aa45-3902eca76895-kube-api-access-2mqpk\") pod \"eadd696b-88c5-4e0c-aa45-3902eca76895\" (UID: \"eadd696b-88c5-4e0c-aa45-3902eca76895\") " Nov 24 08:16:19 crc kubenswrapper[4691]: I1124 08:16:19.769252 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/eadd696b-88c5-4e0c-aa45-3902eca76895-logs\") pod \"eadd696b-88c5-4e0c-aa45-3902eca76895\" (UID: \"eadd696b-88c5-4e0c-aa45-3902eca76895\") " Nov 24 08:16:19 crc kubenswrapper[4691]: I1124 08:16:19.769280 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eadd696b-88c5-4e0c-aa45-3902eca76895-config-data\") pod \"eadd696b-88c5-4e0c-aa45-3902eca76895\" (UID: \"eadd696b-88c5-4e0c-aa45-3902eca76895\") " Nov 24 08:16:19 crc kubenswrapper[4691]: I1124 08:16:19.773334 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eadd696b-88c5-4e0c-aa45-3902eca76895-logs" (OuterVolumeSpecName: "logs") pod "eadd696b-88c5-4e0c-aa45-3902eca76895" (UID: "eadd696b-88c5-4e0c-aa45-3902eca76895"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:16:19 crc kubenswrapper[4691]: I1124 08:16:19.784895 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eadd696b-88c5-4e0c-aa45-3902eca76895-kube-api-access-2mqpk" (OuterVolumeSpecName: "kube-api-access-2mqpk") pod "eadd696b-88c5-4e0c-aa45-3902eca76895" (UID: "eadd696b-88c5-4e0c-aa45-3902eca76895"). InnerVolumeSpecName "kube-api-access-2mqpk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:16:19 crc kubenswrapper[4691]: I1124 08:16:19.831620 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eadd696b-88c5-4e0c-aa45-3902eca76895-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "eadd696b-88c5-4e0c-aa45-3902eca76895" (UID: "eadd696b-88c5-4e0c-aa45-3902eca76895"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:16:19 crc kubenswrapper[4691]: I1124 08:16:19.841690 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eadd696b-88c5-4e0c-aa45-3902eca76895-config-data" (OuterVolumeSpecName: "config-data") pod "eadd696b-88c5-4e0c-aa45-3902eca76895" (UID: "eadd696b-88c5-4e0c-aa45-3902eca76895"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:16:19 crc kubenswrapper[4691]: I1124 08:16:19.874228 4691 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/eadd696b-88c5-4e0c-aa45-3902eca76895-logs\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:19 crc kubenswrapper[4691]: I1124 08:16:19.874277 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eadd696b-88c5-4e0c-aa45-3902eca76895-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:19 crc kubenswrapper[4691]: I1124 08:16:19.874292 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eadd696b-88c5-4e0c-aa45-3902eca76895-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:19 crc kubenswrapper[4691]: I1124 08:16:19.874308 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2mqpk\" (UniqueName: \"kubernetes.io/projected/eadd696b-88c5-4e0c-aa45-3902eca76895-kube-api-access-2mqpk\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.243598 4691 generic.go:334] "Generic (PLEG): container finished" podID="eadd696b-88c5-4e0c-aa45-3902eca76895" containerID="319d338eabfb730de8011e065b56fdc10c38e02e1c158ff3841df300cb257b2c" exitCode=0 Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.243700 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"eadd696b-88c5-4e0c-aa45-3902eca76895","Type":"ContainerDied","Data":"319d338eabfb730de8011e065b56fdc10c38e02e1c158ff3841df300cb257b2c"} Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.243751 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"eadd696b-88c5-4e0c-aa45-3902eca76895","Type":"ContainerDied","Data":"490649e57a1f0bb436e92115fd9e2e63af4316aa0d6b550c356b8b90d2fe805d"} Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.243777 4691 scope.go:117] "RemoveContainer" containerID="319d338eabfb730de8011e065b56fdc10c38e02e1c158ff3841df300cb257b2c" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.244134 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.251668 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d97672fd-30ed-424c-a3fc-f3df0e0f4083","Type":"ContainerStarted","Data":"edd7a88c4e0f7e64512fe3a49c370291936f7b49577ab2ac28edbedc6a17ae88"} Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.315040 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.326738 4691 scope.go:117] "RemoveContainer" containerID="06c84d588addc7123b511a4130eac9c444c4a659b998be5ec401aff32dd71c7d" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.331303 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.382608 4691 scope.go:117] "RemoveContainer" containerID="319d338eabfb730de8011e065b56fdc10c38e02e1c158ff3841df300cb257b2c" Nov 24 08:16:20 crc kubenswrapper[4691]: E1124 08:16:20.388501 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"319d338eabfb730de8011e065b56fdc10c38e02e1c158ff3841df300cb257b2c\": container with ID starting with 319d338eabfb730de8011e065b56fdc10c38e02e1c158ff3841df300cb257b2c not found: ID does not exist" containerID="319d338eabfb730de8011e065b56fdc10c38e02e1c158ff3841df300cb257b2c" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.388560 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"319d338eabfb730de8011e065b56fdc10c38e02e1c158ff3841df300cb257b2c"} err="failed to get container status \"319d338eabfb730de8011e065b56fdc10c38e02e1c158ff3841df300cb257b2c\": rpc error: code = NotFound desc = could not find container \"319d338eabfb730de8011e065b56fdc10c38e02e1c158ff3841df300cb257b2c\": container with ID starting with 319d338eabfb730de8011e065b56fdc10c38e02e1c158ff3841df300cb257b2c not found: ID does not exist" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.388596 4691 scope.go:117] "RemoveContainer" containerID="06c84d588addc7123b511a4130eac9c444c4a659b998be5ec401aff32dd71c7d" Nov 24 08:16:20 crc kubenswrapper[4691]: E1124 08:16:20.392546 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"06c84d588addc7123b511a4130eac9c444c4a659b998be5ec401aff32dd71c7d\": container with ID starting with 06c84d588addc7123b511a4130eac9c444c4a659b998be5ec401aff32dd71c7d not found: ID does not exist" containerID="06c84d588addc7123b511a4130eac9c444c4a659b998be5ec401aff32dd71c7d" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.392571 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06c84d588addc7123b511a4130eac9c444c4a659b998be5ec401aff32dd71c7d"} err="failed to get container status \"06c84d588addc7123b511a4130eac9c444c4a659b998be5ec401aff32dd71c7d\": rpc error: code = NotFound desc = could not find container \"06c84d588addc7123b511a4130eac9c444c4a659b998be5ec401aff32dd71c7d\": container with ID starting with 06c84d588addc7123b511a4130eac9c444c4a659b998be5ec401aff32dd71c7d not found: ID does not exist" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.392611 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 24 08:16:20 crc kubenswrapper[4691]: E1124 08:16:20.393195 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eadd696b-88c5-4e0c-aa45-3902eca76895" containerName="nova-api-api" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.393218 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="eadd696b-88c5-4e0c-aa45-3902eca76895" containerName="nova-api-api" Nov 24 08:16:20 crc kubenswrapper[4691]: E1124 08:16:20.393233 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eadd696b-88c5-4e0c-aa45-3902eca76895" containerName="nova-api-log" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.393239 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="eadd696b-88c5-4e0c-aa45-3902eca76895" containerName="nova-api-log" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.393521 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="eadd696b-88c5-4e0c-aa45-3902eca76895" containerName="nova-api-log" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.393537 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="eadd696b-88c5-4e0c-aa45-3902eca76895" containerName="nova-api-api" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.394793 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.400730 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.400801 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.400826 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.415799 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.493415 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84240b43-dd9d-4cb6-842b-e76fb1e73081-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"84240b43-dd9d-4cb6-842b-e76fb1e73081\") " pod="openstack/nova-api-0" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.493516 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/84240b43-dd9d-4cb6-842b-e76fb1e73081-logs\") pod \"nova-api-0\" (UID: \"84240b43-dd9d-4cb6-842b-e76fb1e73081\") " pod="openstack/nova-api-0" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.493686 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j9w5g\" (UniqueName: \"kubernetes.io/projected/84240b43-dd9d-4cb6-842b-e76fb1e73081-kube-api-access-j9w5g\") pod \"nova-api-0\" (UID: \"84240b43-dd9d-4cb6-842b-e76fb1e73081\") " pod="openstack/nova-api-0" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.494882 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/84240b43-dd9d-4cb6-842b-e76fb1e73081-internal-tls-certs\") pod \"nova-api-0\" (UID: \"84240b43-dd9d-4cb6-842b-e76fb1e73081\") " pod="openstack/nova-api-0" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.494985 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/84240b43-dd9d-4cb6-842b-e76fb1e73081-public-tls-certs\") pod \"nova-api-0\" (UID: \"84240b43-dd9d-4cb6-842b-e76fb1e73081\") " pod="openstack/nova-api-0" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.495197 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84240b43-dd9d-4cb6-842b-e76fb1e73081-config-data\") pod \"nova-api-0\" (UID: \"84240b43-dd9d-4cb6-842b-e76fb1e73081\") " pod="openstack/nova-api-0" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.597392 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84240b43-dd9d-4cb6-842b-e76fb1e73081-config-data\") pod \"nova-api-0\" (UID: \"84240b43-dd9d-4cb6-842b-e76fb1e73081\") " pod="openstack/nova-api-0" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.597502 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84240b43-dd9d-4cb6-842b-e76fb1e73081-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"84240b43-dd9d-4cb6-842b-e76fb1e73081\") " pod="openstack/nova-api-0" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.597538 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/84240b43-dd9d-4cb6-842b-e76fb1e73081-logs\") pod \"nova-api-0\" (UID: \"84240b43-dd9d-4cb6-842b-e76fb1e73081\") " pod="openstack/nova-api-0" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.597661 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j9w5g\" (UniqueName: \"kubernetes.io/projected/84240b43-dd9d-4cb6-842b-e76fb1e73081-kube-api-access-j9w5g\") pod \"nova-api-0\" (UID: \"84240b43-dd9d-4cb6-842b-e76fb1e73081\") " pod="openstack/nova-api-0" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.597711 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/84240b43-dd9d-4cb6-842b-e76fb1e73081-internal-tls-certs\") pod \"nova-api-0\" (UID: \"84240b43-dd9d-4cb6-842b-e76fb1e73081\") " pod="openstack/nova-api-0" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.597750 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/84240b43-dd9d-4cb6-842b-e76fb1e73081-public-tls-certs\") pod \"nova-api-0\" (UID: \"84240b43-dd9d-4cb6-842b-e76fb1e73081\") " pod="openstack/nova-api-0" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.598267 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/84240b43-dd9d-4cb6-842b-e76fb1e73081-logs\") pod \"nova-api-0\" (UID: \"84240b43-dd9d-4cb6-842b-e76fb1e73081\") " pod="openstack/nova-api-0" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.603873 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84240b43-dd9d-4cb6-842b-e76fb1e73081-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"84240b43-dd9d-4cb6-842b-e76fb1e73081\") " pod="openstack/nova-api-0" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.604641 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84240b43-dd9d-4cb6-842b-e76fb1e73081-config-data\") pod \"nova-api-0\" (UID: \"84240b43-dd9d-4cb6-842b-e76fb1e73081\") " pod="openstack/nova-api-0" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.606721 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/84240b43-dd9d-4cb6-842b-e76fb1e73081-internal-tls-certs\") pod \"nova-api-0\" (UID: \"84240b43-dd9d-4cb6-842b-e76fb1e73081\") " pod="openstack/nova-api-0" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.606804 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/84240b43-dd9d-4cb6-842b-e76fb1e73081-public-tls-certs\") pod \"nova-api-0\" (UID: \"84240b43-dd9d-4cb6-842b-e76fb1e73081\") " pod="openstack/nova-api-0" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.628924 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j9w5g\" (UniqueName: \"kubernetes.io/projected/84240b43-dd9d-4cb6-842b-e76fb1e73081-kube-api-access-j9w5g\") pod \"nova-api-0\" (UID: \"84240b43-dd9d-4cb6-842b-e76fb1e73081\") " pod="openstack/nova-api-0" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.738372 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 08:16:20 crc kubenswrapper[4691]: I1124 08:16:20.776197 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eadd696b-88c5-4e0c-aa45-3902eca76895" path="/var/lib/kubelet/pods/eadd696b-88c5-4e0c-aa45-3902eca76895/volumes" Nov 24 08:16:21 crc kubenswrapper[4691]: I1124 08:16:21.089984 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:16:21 crc kubenswrapper[4691]: I1124 08:16:21.090601 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:16:21 crc kubenswrapper[4691]: I1124 08:16:21.216635 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 24 08:16:21 crc kubenswrapper[4691]: I1124 08:16:21.270307 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"84240b43-dd9d-4cb6-842b-e76fb1e73081","Type":"ContainerStarted","Data":"3d7b1f76d9d53bee81e777fda670a690f5323a1911c5ab83d0901d77ae8354d7"} Nov 24 08:16:21 crc kubenswrapper[4691]: I1124 08:16:21.777002 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:16:21 crc kubenswrapper[4691]: I1124 08:16:21.802826 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:16:22 crc kubenswrapper[4691]: I1124 08:16:22.283828 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d97672fd-30ed-424c-a3fc-f3df0e0f4083","Type":"ContainerStarted","Data":"98cf59f60c9aeb64f6d56ab4e649b05517cd8ef25e5eda80bb6a36192131f490"} Nov 24 08:16:22 crc kubenswrapper[4691]: I1124 08:16:22.288475 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"84240b43-dd9d-4cb6-842b-e76fb1e73081","Type":"ContainerStarted","Data":"33b42f0209f45dedf4aca480bff738bfecc7ace66e44df4f8d5b3c6881028aae"} Nov 24 08:16:22 crc kubenswrapper[4691]: I1124 08:16:22.288539 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"84240b43-dd9d-4cb6-842b-e76fb1e73081","Type":"ContainerStarted","Data":"a593951ee6b40c0bf650734ba7c69cfb263cebf6230aa6e17da8fb1a124b0a3f"} Nov 24 08:16:22 crc kubenswrapper[4691]: I1124 08:16:22.314959 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 24 08:16:22 crc kubenswrapper[4691]: I1124 08:16:22.317890 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.317867632 podStartE2EDuration="2.317867632s" podCreationTimestamp="2025-11-24 08:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:16:22.311372164 +0000 UTC m=+1144.310321433" watchObservedRunningTime="2025-11-24 08:16:22.317867632 +0000 UTC m=+1144.316816881" Nov 24 08:16:22 crc kubenswrapper[4691]: I1124 08:16:22.505112 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-6qk4g"] Nov 24 08:16:22 crc kubenswrapper[4691]: I1124 08:16:22.507184 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-6qk4g" Nov 24 08:16:22 crc kubenswrapper[4691]: I1124 08:16:22.511168 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 24 08:16:22 crc kubenswrapper[4691]: I1124 08:16:22.511266 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 24 08:16:22 crc kubenswrapper[4691]: I1124 08:16:22.516477 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-6qk4g"] Nov 24 08:16:22 crc kubenswrapper[4691]: I1124 08:16:22.559797 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x2whj\" (UniqueName: \"kubernetes.io/projected/a34df957-8938-4332-9781-bad870fa9531-kube-api-access-x2whj\") pod \"nova-cell1-cell-mapping-6qk4g\" (UID: \"a34df957-8938-4332-9781-bad870fa9531\") " pod="openstack/nova-cell1-cell-mapping-6qk4g" Nov 24 08:16:22 crc kubenswrapper[4691]: I1124 08:16:22.559847 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a34df957-8938-4332-9781-bad870fa9531-scripts\") pod \"nova-cell1-cell-mapping-6qk4g\" (UID: \"a34df957-8938-4332-9781-bad870fa9531\") " pod="openstack/nova-cell1-cell-mapping-6qk4g" Nov 24 08:16:22 crc kubenswrapper[4691]: I1124 08:16:22.559887 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a34df957-8938-4332-9781-bad870fa9531-config-data\") pod \"nova-cell1-cell-mapping-6qk4g\" (UID: \"a34df957-8938-4332-9781-bad870fa9531\") " pod="openstack/nova-cell1-cell-mapping-6qk4g" Nov 24 08:16:22 crc kubenswrapper[4691]: I1124 08:16:22.559911 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a34df957-8938-4332-9781-bad870fa9531-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-6qk4g\" (UID: \"a34df957-8938-4332-9781-bad870fa9531\") " pod="openstack/nova-cell1-cell-mapping-6qk4g" Nov 24 08:16:22 crc kubenswrapper[4691]: I1124 08:16:22.662652 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x2whj\" (UniqueName: \"kubernetes.io/projected/a34df957-8938-4332-9781-bad870fa9531-kube-api-access-x2whj\") pod \"nova-cell1-cell-mapping-6qk4g\" (UID: \"a34df957-8938-4332-9781-bad870fa9531\") " pod="openstack/nova-cell1-cell-mapping-6qk4g" Nov 24 08:16:22 crc kubenswrapper[4691]: I1124 08:16:22.662734 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a34df957-8938-4332-9781-bad870fa9531-scripts\") pod \"nova-cell1-cell-mapping-6qk4g\" (UID: \"a34df957-8938-4332-9781-bad870fa9531\") " pod="openstack/nova-cell1-cell-mapping-6qk4g" Nov 24 08:16:22 crc kubenswrapper[4691]: I1124 08:16:22.662806 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a34df957-8938-4332-9781-bad870fa9531-config-data\") pod \"nova-cell1-cell-mapping-6qk4g\" (UID: \"a34df957-8938-4332-9781-bad870fa9531\") " pod="openstack/nova-cell1-cell-mapping-6qk4g" Nov 24 08:16:22 crc kubenswrapper[4691]: I1124 08:16:22.662841 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a34df957-8938-4332-9781-bad870fa9531-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-6qk4g\" (UID: \"a34df957-8938-4332-9781-bad870fa9531\") " pod="openstack/nova-cell1-cell-mapping-6qk4g" Nov 24 08:16:22 crc kubenswrapper[4691]: I1124 08:16:22.667169 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a34df957-8938-4332-9781-bad870fa9531-scripts\") pod \"nova-cell1-cell-mapping-6qk4g\" (UID: \"a34df957-8938-4332-9781-bad870fa9531\") " pod="openstack/nova-cell1-cell-mapping-6qk4g" Nov 24 08:16:22 crc kubenswrapper[4691]: I1124 08:16:22.668026 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a34df957-8938-4332-9781-bad870fa9531-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-6qk4g\" (UID: \"a34df957-8938-4332-9781-bad870fa9531\") " pod="openstack/nova-cell1-cell-mapping-6qk4g" Nov 24 08:16:22 crc kubenswrapper[4691]: I1124 08:16:22.671287 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a34df957-8938-4332-9781-bad870fa9531-config-data\") pod \"nova-cell1-cell-mapping-6qk4g\" (UID: \"a34df957-8938-4332-9781-bad870fa9531\") " pod="openstack/nova-cell1-cell-mapping-6qk4g" Nov 24 08:16:22 crc kubenswrapper[4691]: I1124 08:16:22.683800 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x2whj\" (UniqueName: \"kubernetes.io/projected/a34df957-8938-4332-9781-bad870fa9531-kube-api-access-x2whj\") pod \"nova-cell1-cell-mapping-6qk4g\" (UID: \"a34df957-8938-4332-9781-bad870fa9531\") " pod="openstack/nova-cell1-cell-mapping-6qk4g" Nov 24 08:16:22 crc kubenswrapper[4691]: I1124 08:16:22.830118 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-6qk4g" Nov 24 08:16:23 crc kubenswrapper[4691]: I1124 08:16:23.302648 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d97672fd-30ed-424c-a3fc-f3df0e0f4083","Type":"ContainerStarted","Data":"7d68e93e90b64a1f520a5078c231ec36df136e428aab05d343e0295b7efebd84"} Nov 24 08:16:23 crc kubenswrapper[4691]: I1124 08:16:23.355604 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-6qk4g"] Nov 24 08:16:23 crc kubenswrapper[4691]: I1124 08:16:23.678345 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-cd5cbd7b9-pqq4l" Nov 24 08:16:23 crc kubenswrapper[4691]: I1124 08:16:23.750555 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-mwltv"] Nov 24 08:16:23 crc kubenswrapper[4691]: I1124 08:16:23.750834 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-bccf8f775-mwltv" podUID="415032f2-8952-4970-a28e-2c64f5f0206e" containerName="dnsmasq-dns" containerID="cri-o://fe1a229e47e24656264ac3b2558fb3badfd425323c0a71efec3504f6b2abd81b" gracePeriod=10 Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.265639 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-mwltv" Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.297136 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/415032f2-8952-4970-a28e-2c64f5f0206e-config\") pod \"415032f2-8952-4970-a28e-2c64f5f0206e\" (UID: \"415032f2-8952-4970-a28e-2c64f5f0206e\") " Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.297484 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/415032f2-8952-4970-a28e-2c64f5f0206e-ovsdbserver-sb\") pod \"415032f2-8952-4970-a28e-2c64f5f0206e\" (UID: \"415032f2-8952-4970-a28e-2c64f5f0206e\") " Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.297558 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/415032f2-8952-4970-a28e-2c64f5f0206e-ovsdbserver-nb\") pod \"415032f2-8952-4970-a28e-2c64f5f0206e\" (UID: \"415032f2-8952-4970-a28e-2c64f5f0206e\") " Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.297602 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/415032f2-8952-4970-a28e-2c64f5f0206e-dns-svc\") pod \"415032f2-8952-4970-a28e-2c64f5f0206e\" (UID: \"415032f2-8952-4970-a28e-2c64f5f0206e\") " Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.297638 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8fmj8\" (UniqueName: \"kubernetes.io/projected/415032f2-8952-4970-a28e-2c64f5f0206e-kube-api-access-8fmj8\") pod \"415032f2-8952-4970-a28e-2c64f5f0206e\" (UID: \"415032f2-8952-4970-a28e-2c64f5f0206e\") " Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.297700 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/415032f2-8952-4970-a28e-2c64f5f0206e-dns-swift-storage-0\") pod \"415032f2-8952-4970-a28e-2c64f5f0206e\" (UID: \"415032f2-8952-4970-a28e-2c64f5f0206e\") " Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.303704 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/415032f2-8952-4970-a28e-2c64f5f0206e-kube-api-access-8fmj8" (OuterVolumeSpecName: "kube-api-access-8fmj8") pod "415032f2-8952-4970-a28e-2c64f5f0206e" (UID: "415032f2-8952-4970-a28e-2c64f5f0206e"). InnerVolumeSpecName "kube-api-access-8fmj8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.325588 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-6qk4g" event={"ID":"a34df957-8938-4332-9781-bad870fa9531","Type":"ContainerStarted","Data":"0c3932950cd16601648944e4aa4f993b998c36673ffdc4a799a689a4b5ab1595"} Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.325650 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-6qk4g" event={"ID":"a34df957-8938-4332-9781-bad870fa9531","Type":"ContainerStarted","Data":"91298c66817365cefd6ea8dad117f10670303e7ca13d126396686bbb3baaf03d"} Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.331532 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d97672fd-30ed-424c-a3fc-f3df0e0f4083","Type":"ContainerStarted","Data":"5310e276bba6b5674b4fce5cc4162f1271ecba754d4ffbf745b698aa6ef58d85"} Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.331695 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d97672fd-30ed-424c-a3fc-f3df0e0f4083" containerName="ceilometer-central-agent" containerID="cri-o://edd7a88c4e0f7e64512fe3a49c370291936f7b49577ab2ac28edbedc6a17ae88" gracePeriod=30 Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.331769 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.331808 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d97672fd-30ed-424c-a3fc-f3df0e0f4083" containerName="proxy-httpd" containerID="cri-o://5310e276bba6b5674b4fce5cc4162f1271ecba754d4ffbf745b698aa6ef58d85" gracePeriod=30 Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.331843 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d97672fd-30ed-424c-a3fc-f3df0e0f4083" containerName="sg-core" containerID="cri-o://7d68e93e90b64a1f520a5078c231ec36df136e428aab05d343e0295b7efebd84" gracePeriod=30 Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.331880 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d97672fd-30ed-424c-a3fc-f3df0e0f4083" containerName="ceilometer-notification-agent" containerID="cri-o://98cf59f60c9aeb64f6d56ab4e649b05517cd8ef25e5eda80bb6a36192131f490" gracePeriod=30 Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.352617 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-6qk4g" podStartSLOduration=2.3525921309999998 podStartE2EDuration="2.352592131s" podCreationTimestamp="2025-11-24 08:16:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:16:24.342100857 +0000 UTC m=+1146.341050106" watchObservedRunningTime="2025-11-24 08:16:24.352592131 +0000 UTC m=+1146.351541380" Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.354623 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-mwltv" event={"ID":"415032f2-8952-4970-a28e-2c64f5f0206e","Type":"ContainerDied","Data":"fe1a229e47e24656264ac3b2558fb3badfd425323c0a71efec3504f6b2abd81b"} Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.354674 4691 scope.go:117] "RemoveContainer" containerID="fe1a229e47e24656264ac3b2558fb3badfd425323c0a71efec3504f6b2abd81b" Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.354706 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-mwltv" Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.354625 4691 generic.go:334] "Generic (PLEG): container finished" podID="415032f2-8952-4970-a28e-2c64f5f0206e" containerID="fe1a229e47e24656264ac3b2558fb3badfd425323c0a71efec3504f6b2abd81b" exitCode=0 Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.354832 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-mwltv" event={"ID":"415032f2-8952-4970-a28e-2c64f5f0206e","Type":"ContainerDied","Data":"f9493f9e7b046f44230d22a0149d7504f622816eab66d90c70e7ac1279d08b95"} Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.382681 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.033569944 podStartE2EDuration="6.382658192s" podCreationTimestamp="2025-11-24 08:16:18 +0000 UTC" firstStartedPulling="2025-11-24 08:16:19.028939218 +0000 UTC m=+1141.027888477" lastFinishedPulling="2025-11-24 08:16:23.378027476 +0000 UTC m=+1145.376976725" observedRunningTime="2025-11-24 08:16:24.362121597 +0000 UTC m=+1146.361070856" watchObservedRunningTime="2025-11-24 08:16:24.382658192 +0000 UTC m=+1146.381607441" Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.400554 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8fmj8\" (UniqueName: \"kubernetes.io/projected/415032f2-8952-4970-a28e-2c64f5f0206e-kube-api-access-8fmj8\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.405132 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/415032f2-8952-4970-a28e-2c64f5f0206e-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "415032f2-8952-4970-a28e-2c64f5f0206e" (UID: "415032f2-8952-4970-a28e-2c64f5f0206e"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.410029 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/415032f2-8952-4970-a28e-2c64f5f0206e-config" (OuterVolumeSpecName: "config") pod "415032f2-8952-4970-a28e-2c64f5f0206e" (UID: "415032f2-8952-4970-a28e-2c64f5f0206e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.415636 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/415032f2-8952-4970-a28e-2c64f5f0206e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "415032f2-8952-4970-a28e-2c64f5f0206e" (UID: "415032f2-8952-4970-a28e-2c64f5f0206e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.415919 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/415032f2-8952-4970-a28e-2c64f5f0206e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "415032f2-8952-4970-a28e-2c64f5f0206e" (UID: "415032f2-8952-4970-a28e-2c64f5f0206e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.418017 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/415032f2-8952-4970-a28e-2c64f5f0206e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "415032f2-8952-4970-a28e-2c64f5f0206e" (UID: "415032f2-8952-4970-a28e-2c64f5f0206e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.502273 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/415032f2-8952-4970-a28e-2c64f5f0206e-config\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.502327 4691 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/415032f2-8952-4970-a28e-2c64f5f0206e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.502347 4691 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/415032f2-8952-4970-a28e-2c64f5f0206e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.502359 4691 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/415032f2-8952-4970-a28e-2c64f5f0206e-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.502371 4691 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/415032f2-8952-4970-a28e-2c64f5f0206e-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.537277 4691 scope.go:117] "RemoveContainer" containerID="112ee9e3b9ab9f771155668e7201b3806c6010c2c47528913e67c5d87d64ce15" Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.596922 4691 scope.go:117] "RemoveContainer" containerID="fe1a229e47e24656264ac3b2558fb3badfd425323c0a71efec3504f6b2abd81b" Nov 24 08:16:24 crc kubenswrapper[4691]: E1124 08:16:24.597384 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe1a229e47e24656264ac3b2558fb3badfd425323c0a71efec3504f6b2abd81b\": container with ID starting with fe1a229e47e24656264ac3b2558fb3badfd425323c0a71efec3504f6b2abd81b not found: ID does not exist" containerID="fe1a229e47e24656264ac3b2558fb3badfd425323c0a71efec3504f6b2abd81b" Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.597412 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe1a229e47e24656264ac3b2558fb3badfd425323c0a71efec3504f6b2abd81b"} err="failed to get container status \"fe1a229e47e24656264ac3b2558fb3badfd425323c0a71efec3504f6b2abd81b\": rpc error: code = NotFound desc = could not find container \"fe1a229e47e24656264ac3b2558fb3badfd425323c0a71efec3504f6b2abd81b\": container with ID starting with fe1a229e47e24656264ac3b2558fb3badfd425323c0a71efec3504f6b2abd81b not found: ID does not exist" Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.597440 4691 scope.go:117] "RemoveContainer" containerID="112ee9e3b9ab9f771155668e7201b3806c6010c2c47528913e67c5d87d64ce15" Nov 24 08:16:24 crc kubenswrapper[4691]: E1124 08:16:24.598050 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"112ee9e3b9ab9f771155668e7201b3806c6010c2c47528913e67c5d87d64ce15\": container with ID starting with 112ee9e3b9ab9f771155668e7201b3806c6010c2c47528913e67c5d87d64ce15 not found: ID does not exist" containerID="112ee9e3b9ab9f771155668e7201b3806c6010c2c47528913e67c5d87d64ce15" Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.598108 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"112ee9e3b9ab9f771155668e7201b3806c6010c2c47528913e67c5d87d64ce15"} err="failed to get container status \"112ee9e3b9ab9f771155668e7201b3806c6010c2c47528913e67c5d87d64ce15\": rpc error: code = NotFound desc = could not find container \"112ee9e3b9ab9f771155668e7201b3806c6010c2c47528913e67c5d87d64ce15\": container with ID starting with 112ee9e3b9ab9f771155668e7201b3806c6010c2c47528913e67c5d87d64ce15 not found: ID does not exist" Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.706717 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-mwltv"] Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.725373 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-mwltv"] Nov 24 08:16:24 crc kubenswrapper[4691]: I1124 08:16:24.773678 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="415032f2-8952-4970-a28e-2c64f5f0206e" path="/var/lib/kubelet/pods/415032f2-8952-4970-a28e-2c64f5f0206e/volumes" Nov 24 08:16:25 crc kubenswrapper[4691]: I1124 08:16:25.367083 4691 generic.go:334] "Generic (PLEG): container finished" podID="d97672fd-30ed-424c-a3fc-f3df0e0f4083" containerID="5310e276bba6b5674b4fce5cc4162f1271ecba754d4ffbf745b698aa6ef58d85" exitCode=0 Nov 24 08:16:25 crc kubenswrapper[4691]: I1124 08:16:25.367563 4691 generic.go:334] "Generic (PLEG): container finished" podID="d97672fd-30ed-424c-a3fc-f3df0e0f4083" containerID="7d68e93e90b64a1f520a5078c231ec36df136e428aab05d343e0295b7efebd84" exitCode=2 Nov 24 08:16:25 crc kubenswrapper[4691]: I1124 08:16:25.367574 4691 generic.go:334] "Generic (PLEG): container finished" podID="d97672fd-30ed-424c-a3fc-f3df0e0f4083" containerID="98cf59f60c9aeb64f6d56ab4e649b05517cd8ef25e5eda80bb6a36192131f490" exitCode=0 Nov 24 08:16:25 crc kubenswrapper[4691]: I1124 08:16:25.367160 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d97672fd-30ed-424c-a3fc-f3df0e0f4083","Type":"ContainerDied","Data":"5310e276bba6b5674b4fce5cc4162f1271ecba754d4ffbf745b698aa6ef58d85"} Nov 24 08:16:25 crc kubenswrapper[4691]: I1124 08:16:25.367654 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d97672fd-30ed-424c-a3fc-f3df0e0f4083","Type":"ContainerDied","Data":"7d68e93e90b64a1f520a5078c231ec36df136e428aab05d343e0295b7efebd84"} Nov 24 08:16:25 crc kubenswrapper[4691]: I1124 08:16:25.367674 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d97672fd-30ed-424c-a3fc-f3df0e0f4083","Type":"ContainerDied","Data":"98cf59f60c9aeb64f6d56ab4e649b05517cd8ef25e5eda80bb6a36192131f490"} Nov 24 08:16:27 crc kubenswrapper[4691]: I1124 08:16:27.398168 4691 generic.go:334] "Generic (PLEG): container finished" podID="d97672fd-30ed-424c-a3fc-f3df0e0f4083" containerID="edd7a88c4e0f7e64512fe3a49c370291936f7b49577ab2ac28edbedc6a17ae88" exitCode=0 Nov 24 08:16:27 crc kubenswrapper[4691]: I1124 08:16:27.398253 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d97672fd-30ed-424c-a3fc-f3df0e0f4083","Type":"ContainerDied","Data":"edd7a88c4e0f7e64512fe3a49c370291936f7b49577ab2ac28edbedc6a17ae88"} Nov 24 08:16:27 crc kubenswrapper[4691]: I1124 08:16:27.398564 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d97672fd-30ed-424c-a3fc-f3df0e0f4083","Type":"ContainerDied","Data":"a63488da99334b6ba7d0d1155a1b25f34b2200e9ec1dfa53f78925a090888ef5"} Nov 24 08:16:27 crc kubenswrapper[4691]: I1124 08:16:27.398582 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a63488da99334b6ba7d0d1155a1b25f34b2200e9ec1dfa53f78925a090888ef5" Nov 24 08:16:27 crc kubenswrapper[4691]: I1124 08:16:27.439940 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 08:16:27 crc kubenswrapper[4691]: I1124 08:16:27.564735 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/d97672fd-30ed-424c-a3fc-f3df0e0f4083-ceilometer-tls-certs\") pod \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\" (UID: \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\") " Nov 24 08:16:27 crc kubenswrapper[4691]: I1124 08:16:27.564856 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d97672fd-30ed-424c-a3fc-f3df0e0f4083-config-data\") pod \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\" (UID: \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\") " Nov 24 08:16:27 crc kubenswrapper[4691]: I1124 08:16:27.564913 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d97672fd-30ed-424c-a3fc-f3df0e0f4083-scripts\") pod \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\" (UID: \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\") " Nov 24 08:16:27 crc kubenswrapper[4691]: I1124 08:16:27.565040 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d97672fd-30ed-424c-a3fc-f3df0e0f4083-combined-ca-bundle\") pod \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\" (UID: \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\") " Nov 24 08:16:27 crc kubenswrapper[4691]: I1124 08:16:27.565076 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8sklp\" (UniqueName: \"kubernetes.io/projected/d97672fd-30ed-424c-a3fc-f3df0e0f4083-kube-api-access-8sklp\") pod \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\" (UID: \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\") " Nov 24 08:16:27 crc kubenswrapper[4691]: I1124 08:16:27.565140 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d97672fd-30ed-424c-a3fc-f3df0e0f4083-sg-core-conf-yaml\") pod \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\" (UID: \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\") " Nov 24 08:16:27 crc kubenswrapper[4691]: I1124 08:16:27.565168 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d97672fd-30ed-424c-a3fc-f3df0e0f4083-run-httpd\") pod \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\" (UID: \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\") " Nov 24 08:16:27 crc kubenswrapper[4691]: I1124 08:16:27.565225 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d97672fd-30ed-424c-a3fc-f3df0e0f4083-log-httpd\") pod \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\" (UID: \"d97672fd-30ed-424c-a3fc-f3df0e0f4083\") " Nov 24 08:16:27 crc kubenswrapper[4691]: I1124 08:16:27.565875 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d97672fd-30ed-424c-a3fc-f3df0e0f4083-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "d97672fd-30ed-424c-a3fc-f3df0e0f4083" (UID: "d97672fd-30ed-424c-a3fc-f3df0e0f4083"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:16:27 crc kubenswrapper[4691]: I1124 08:16:27.566083 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d97672fd-30ed-424c-a3fc-f3df0e0f4083-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "d97672fd-30ed-424c-a3fc-f3df0e0f4083" (UID: "d97672fd-30ed-424c-a3fc-f3df0e0f4083"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:16:27 crc kubenswrapper[4691]: I1124 08:16:27.569604 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d97672fd-30ed-424c-a3fc-f3df0e0f4083-scripts" (OuterVolumeSpecName: "scripts") pod "d97672fd-30ed-424c-a3fc-f3df0e0f4083" (UID: "d97672fd-30ed-424c-a3fc-f3df0e0f4083"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:16:27 crc kubenswrapper[4691]: I1124 08:16:27.586366 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d97672fd-30ed-424c-a3fc-f3df0e0f4083-kube-api-access-8sklp" (OuterVolumeSpecName: "kube-api-access-8sklp") pod "d97672fd-30ed-424c-a3fc-f3df0e0f4083" (UID: "d97672fd-30ed-424c-a3fc-f3df0e0f4083"). InnerVolumeSpecName "kube-api-access-8sklp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:16:27 crc kubenswrapper[4691]: I1124 08:16:27.591887 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d97672fd-30ed-424c-a3fc-f3df0e0f4083-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "d97672fd-30ed-424c-a3fc-f3df0e0f4083" (UID: "d97672fd-30ed-424c-a3fc-f3df0e0f4083"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:16:27 crc kubenswrapper[4691]: I1124 08:16:27.619725 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d97672fd-30ed-424c-a3fc-f3df0e0f4083-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "d97672fd-30ed-424c-a3fc-f3df0e0f4083" (UID: "d97672fd-30ed-424c-a3fc-f3df0e0f4083"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:16:27 crc kubenswrapper[4691]: I1124 08:16:27.638945 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d97672fd-30ed-424c-a3fc-f3df0e0f4083-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d97672fd-30ed-424c-a3fc-f3df0e0f4083" (UID: "d97672fd-30ed-424c-a3fc-f3df0e0f4083"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:16:27 crc kubenswrapper[4691]: I1124 08:16:27.662654 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d97672fd-30ed-424c-a3fc-f3df0e0f4083-config-data" (OuterVolumeSpecName: "config-data") pod "d97672fd-30ed-424c-a3fc-f3df0e0f4083" (UID: "d97672fd-30ed-424c-a3fc-f3df0e0f4083"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:16:27 crc kubenswrapper[4691]: I1124 08:16:27.667969 4691 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d97672fd-30ed-424c-a3fc-f3df0e0f4083-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:27 crc kubenswrapper[4691]: I1124 08:16:27.668060 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d97672fd-30ed-424c-a3fc-f3df0e0f4083-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:27 crc kubenswrapper[4691]: I1124 08:16:27.668124 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8sklp\" (UniqueName: \"kubernetes.io/projected/d97672fd-30ed-424c-a3fc-f3df0e0f4083-kube-api-access-8sklp\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:27 crc kubenswrapper[4691]: I1124 08:16:27.668199 4691 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d97672fd-30ed-424c-a3fc-f3df0e0f4083-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:27 crc kubenswrapper[4691]: I1124 08:16:27.668255 4691 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d97672fd-30ed-424c-a3fc-f3df0e0f4083-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:27 crc kubenswrapper[4691]: I1124 08:16:27.668323 4691 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d97672fd-30ed-424c-a3fc-f3df0e0f4083-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:27 crc kubenswrapper[4691]: I1124 08:16:27.668399 4691 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/d97672fd-30ed-424c-a3fc-f3df0e0f4083-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:27 crc kubenswrapper[4691]: I1124 08:16:27.668500 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d97672fd-30ed-424c-a3fc-f3df0e0f4083-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.407855 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.449419 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.461267 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.485312 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:16:28 crc kubenswrapper[4691]: E1124 08:16:28.485807 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d97672fd-30ed-424c-a3fc-f3df0e0f4083" containerName="sg-core" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.485825 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="d97672fd-30ed-424c-a3fc-f3df0e0f4083" containerName="sg-core" Nov 24 08:16:28 crc kubenswrapper[4691]: E1124 08:16:28.485841 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d97672fd-30ed-424c-a3fc-f3df0e0f4083" containerName="ceilometer-notification-agent" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.485848 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="d97672fd-30ed-424c-a3fc-f3df0e0f4083" containerName="ceilometer-notification-agent" Nov 24 08:16:28 crc kubenswrapper[4691]: E1124 08:16:28.485864 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="415032f2-8952-4970-a28e-2c64f5f0206e" containerName="init" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.485869 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="415032f2-8952-4970-a28e-2c64f5f0206e" containerName="init" Nov 24 08:16:28 crc kubenswrapper[4691]: E1124 08:16:28.485876 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d97672fd-30ed-424c-a3fc-f3df0e0f4083" containerName="ceilometer-central-agent" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.485882 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="d97672fd-30ed-424c-a3fc-f3df0e0f4083" containerName="ceilometer-central-agent" Nov 24 08:16:28 crc kubenswrapper[4691]: E1124 08:16:28.485893 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="415032f2-8952-4970-a28e-2c64f5f0206e" containerName="dnsmasq-dns" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.485898 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="415032f2-8952-4970-a28e-2c64f5f0206e" containerName="dnsmasq-dns" Nov 24 08:16:28 crc kubenswrapper[4691]: E1124 08:16:28.485915 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d97672fd-30ed-424c-a3fc-f3df0e0f4083" containerName="proxy-httpd" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.485922 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="d97672fd-30ed-424c-a3fc-f3df0e0f4083" containerName="proxy-httpd" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.486103 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="d97672fd-30ed-424c-a3fc-f3df0e0f4083" containerName="proxy-httpd" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.486116 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="d97672fd-30ed-424c-a3fc-f3df0e0f4083" containerName="sg-core" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.486130 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="d97672fd-30ed-424c-a3fc-f3df0e0f4083" containerName="ceilometer-notification-agent" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.486141 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="415032f2-8952-4970-a28e-2c64f5f0206e" containerName="dnsmasq-dns" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.486154 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="d97672fd-30ed-424c-a3fc-f3df0e0f4083" containerName="ceilometer-central-agent" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.488087 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.496239 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.496247 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.496697 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.503240 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.589720 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0e783b59-54e1-401f-a281-b665848b7083-run-httpd\") pod \"ceilometer-0\" (UID: \"0e783b59-54e1-401f-a281-b665848b7083\") " pod="openstack/ceilometer-0" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.589784 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0e783b59-54e1-401f-a281-b665848b7083-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0e783b59-54e1-401f-a281-b665848b7083\") " pod="openstack/ceilometer-0" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.589823 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0e783b59-54e1-401f-a281-b665848b7083-scripts\") pod \"ceilometer-0\" (UID: \"0e783b59-54e1-401f-a281-b665848b7083\") " pod="openstack/ceilometer-0" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.589954 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0e783b59-54e1-401f-a281-b665848b7083-log-httpd\") pod \"ceilometer-0\" (UID: \"0e783b59-54e1-401f-a281-b665848b7083\") " pod="openstack/ceilometer-0" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.590059 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e783b59-54e1-401f-a281-b665848b7083-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0e783b59-54e1-401f-a281-b665848b7083\") " pod="openstack/ceilometer-0" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.590099 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e783b59-54e1-401f-a281-b665848b7083-config-data\") pod \"ceilometer-0\" (UID: \"0e783b59-54e1-401f-a281-b665848b7083\") " pod="openstack/ceilometer-0" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.590134 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e783b59-54e1-401f-a281-b665848b7083-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"0e783b59-54e1-401f-a281-b665848b7083\") " pod="openstack/ceilometer-0" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.590241 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8p5wt\" (UniqueName: \"kubernetes.io/projected/0e783b59-54e1-401f-a281-b665848b7083-kube-api-access-8p5wt\") pod \"ceilometer-0\" (UID: \"0e783b59-54e1-401f-a281-b665848b7083\") " pod="openstack/ceilometer-0" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.693708 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0e783b59-54e1-401f-a281-b665848b7083-log-httpd\") pod \"ceilometer-0\" (UID: \"0e783b59-54e1-401f-a281-b665848b7083\") " pod="openstack/ceilometer-0" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.694033 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e783b59-54e1-401f-a281-b665848b7083-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0e783b59-54e1-401f-a281-b665848b7083\") " pod="openstack/ceilometer-0" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.694106 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e783b59-54e1-401f-a281-b665848b7083-config-data\") pod \"ceilometer-0\" (UID: \"0e783b59-54e1-401f-a281-b665848b7083\") " pod="openstack/ceilometer-0" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.694163 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e783b59-54e1-401f-a281-b665848b7083-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"0e783b59-54e1-401f-a281-b665848b7083\") " pod="openstack/ceilometer-0" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.694289 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0e783b59-54e1-401f-a281-b665848b7083-log-httpd\") pod \"ceilometer-0\" (UID: \"0e783b59-54e1-401f-a281-b665848b7083\") " pod="openstack/ceilometer-0" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.694351 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8p5wt\" (UniqueName: \"kubernetes.io/projected/0e783b59-54e1-401f-a281-b665848b7083-kube-api-access-8p5wt\") pod \"ceilometer-0\" (UID: \"0e783b59-54e1-401f-a281-b665848b7083\") " pod="openstack/ceilometer-0" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.694425 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0e783b59-54e1-401f-a281-b665848b7083-run-httpd\") pod \"ceilometer-0\" (UID: \"0e783b59-54e1-401f-a281-b665848b7083\") " pod="openstack/ceilometer-0" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.694505 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0e783b59-54e1-401f-a281-b665848b7083-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0e783b59-54e1-401f-a281-b665848b7083\") " pod="openstack/ceilometer-0" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.694566 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0e783b59-54e1-401f-a281-b665848b7083-scripts\") pod \"ceilometer-0\" (UID: \"0e783b59-54e1-401f-a281-b665848b7083\") " pod="openstack/ceilometer-0" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.697410 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0e783b59-54e1-401f-a281-b665848b7083-run-httpd\") pod \"ceilometer-0\" (UID: \"0e783b59-54e1-401f-a281-b665848b7083\") " pod="openstack/ceilometer-0" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.700651 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e783b59-54e1-401f-a281-b665848b7083-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"0e783b59-54e1-401f-a281-b665848b7083\") " pod="openstack/ceilometer-0" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.701913 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e783b59-54e1-401f-a281-b665848b7083-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0e783b59-54e1-401f-a281-b665848b7083\") " pod="openstack/ceilometer-0" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.710177 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0e783b59-54e1-401f-a281-b665848b7083-scripts\") pod \"ceilometer-0\" (UID: \"0e783b59-54e1-401f-a281-b665848b7083\") " pod="openstack/ceilometer-0" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.712282 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0e783b59-54e1-401f-a281-b665848b7083-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0e783b59-54e1-401f-a281-b665848b7083\") " pod="openstack/ceilometer-0" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.716981 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e783b59-54e1-401f-a281-b665848b7083-config-data\") pod \"ceilometer-0\" (UID: \"0e783b59-54e1-401f-a281-b665848b7083\") " pod="openstack/ceilometer-0" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.717875 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8p5wt\" (UniqueName: \"kubernetes.io/projected/0e783b59-54e1-401f-a281-b665848b7083-kube-api-access-8p5wt\") pod \"ceilometer-0\" (UID: \"0e783b59-54e1-401f-a281-b665848b7083\") " pod="openstack/ceilometer-0" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.778582 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d97672fd-30ed-424c-a3fc-f3df0e0f4083" path="/var/lib/kubelet/pods/d97672fd-30ed-424c-a3fc-f3df0e0f4083/volumes" Nov 24 08:16:28 crc kubenswrapper[4691]: I1124 08:16:28.812818 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 24 08:16:29 crc kubenswrapper[4691]: I1124 08:16:29.274774 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 24 08:16:29 crc kubenswrapper[4691]: I1124 08:16:29.419807 4691 generic.go:334] "Generic (PLEG): container finished" podID="a34df957-8938-4332-9781-bad870fa9531" containerID="0c3932950cd16601648944e4aa4f993b998c36673ffdc4a799a689a4b5ab1595" exitCode=0 Nov 24 08:16:29 crc kubenswrapper[4691]: I1124 08:16:29.419880 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-6qk4g" event={"ID":"a34df957-8938-4332-9781-bad870fa9531","Type":"ContainerDied","Data":"0c3932950cd16601648944e4aa4f993b998c36673ffdc4a799a689a4b5ab1595"} Nov 24 08:16:29 crc kubenswrapper[4691]: I1124 08:16:29.421906 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0e783b59-54e1-401f-a281-b665848b7083","Type":"ContainerStarted","Data":"56da3abdb62f709bd984dc2a44e97144276f5d7120fc35e28452d320f566ae71"} Nov 24 08:16:30 crc kubenswrapper[4691]: I1124 08:16:30.439080 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0e783b59-54e1-401f-a281-b665848b7083","Type":"ContainerStarted","Data":"f1f8b116753fe928cffe4f7e521b4c258b748d8eba250d79e81db1dd00d5cbce"} Nov 24 08:16:30 crc kubenswrapper[4691]: I1124 08:16:30.738903 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 24 08:16:30 crc kubenswrapper[4691]: I1124 08:16:30.738956 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 24 08:16:30 crc kubenswrapper[4691]: I1124 08:16:30.832731 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-6qk4g" Nov 24 08:16:30 crc kubenswrapper[4691]: I1124 08:16:30.940799 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2whj\" (UniqueName: \"kubernetes.io/projected/a34df957-8938-4332-9781-bad870fa9531-kube-api-access-x2whj\") pod \"a34df957-8938-4332-9781-bad870fa9531\" (UID: \"a34df957-8938-4332-9781-bad870fa9531\") " Nov 24 08:16:30 crc kubenswrapper[4691]: I1124 08:16:30.940871 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a34df957-8938-4332-9781-bad870fa9531-config-data\") pod \"a34df957-8938-4332-9781-bad870fa9531\" (UID: \"a34df957-8938-4332-9781-bad870fa9531\") " Nov 24 08:16:30 crc kubenswrapper[4691]: I1124 08:16:30.940900 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a34df957-8938-4332-9781-bad870fa9531-scripts\") pod \"a34df957-8938-4332-9781-bad870fa9531\" (UID: \"a34df957-8938-4332-9781-bad870fa9531\") " Nov 24 08:16:30 crc kubenswrapper[4691]: I1124 08:16:30.940941 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a34df957-8938-4332-9781-bad870fa9531-combined-ca-bundle\") pod \"a34df957-8938-4332-9781-bad870fa9531\" (UID: \"a34df957-8938-4332-9781-bad870fa9531\") " Nov 24 08:16:30 crc kubenswrapper[4691]: I1124 08:16:30.958146 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a34df957-8938-4332-9781-bad870fa9531-kube-api-access-x2whj" (OuterVolumeSpecName: "kube-api-access-x2whj") pod "a34df957-8938-4332-9781-bad870fa9531" (UID: "a34df957-8938-4332-9781-bad870fa9531"). InnerVolumeSpecName "kube-api-access-x2whj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:16:30 crc kubenswrapper[4691]: I1124 08:16:30.962148 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a34df957-8938-4332-9781-bad870fa9531-scripts" (OuterVolumeSpecName: "scripts") pod "a34df957-8938-4332-9781-bad870fa9531" (UID: "a34df957-8938-4332-9781-bad870fa9531"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:16:30 crc kubenswrapper[4691]: I1124 08:16:30.988284 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a34df957-8938-4332-9781-bad870fa9531-config-data" (OuterVolumeSpecName: "config-data") pod "a34df957-8938-4332-9781-bad870fa9531" (UID: "a34df957-8938-4332-9781-bad870fa9531"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:16:31 crc kubenswrapper[4691]: I1124 08:16:31.012199 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a34df957-8938-4332-9781-bad870fa9531-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a34df957-8938-4332-9781-bad870fa9531" (UID: "a34df957-8938-4332-9781-bad870fa9531"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:16:31 crc kubenswrapper[4691]: I1124 08:16:31.042857 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a34df957-8938-4332-9781-bad870fa9531-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:31 crc kubenswrapper[4691]: I1124 08:16:31.042890 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2whj\" (UniqueName: \"kubernetes.io/projected/a34df957-8938-4332-9781-bad870fa9531-kube-api-access-x2whj\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:31 crc kubenswrapper[4691]: I1124 08:16:31.042901 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a34df957-8938-4332-9781-bad870fa9531-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:31 crc kubenswrapper[4691]: I1124 08:16:31.042910 4691 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a34df957-8938-4332-9781-bad870fa9531-scripts\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:31 crc kubenswrapper[4691]: I1124 08:16:31.451156 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-6qk4g" event={"ID":"a34df957-8938-4332-9781-bad870fa9531","Type":"ContainerDied","Data":"91298c66817365cefd6ea8dad117f10670303e7ca13d126396686bbb3baaf03d"} Nov 24 08:16:31 crc kubenswrapper[4691]: I1124 08:16:31.451214 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="91298c66817365cefd6ea8dad117f10670303e7ca13d126396686bbb3baaf03d" Nov 24 08:16:31 crc kubenswrapper[4691]: I1124 08:16:31.451299 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-6qk4g" Nov 24 08:16:31 crc kubenswrapper[4691]: I1124 08:16:31.455157 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0e783b59-54e1-401f-a281-b665848b7083","Type":"ContainerStarted","Data":"21e11bfaa2c2120c1c3256359198cb9c92b0514cbbb00317de36765485398c72"} Nov 24 08:16:31 crc kubenswrapper[4691]: I1124 08:16:31.654437 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 24 08:16:31 crc kubenswrapper[4691]: I1124 08:16:31.654752 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="84240b43-dd9d-4cb6-842b-e76fb1e73081" containerName="nova-api-log" containerID="cri-o://a593951ee6b40c0bf650734ba7c69cfb263cebf6230aa6e17da8fb1a124b0a3f" gracePeriod=30 Nov 24 08:16:31 crc kubenswrapper[4691]: I1124 08:16:31.654826 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="84240b43-dd9d-4cb6-842b-e76fb1e73081" containerName="nova-api-api" containerID="cri-o://33b42f0209f45dedf4aca480bff738bfecc7ace66e44df4f8d5b3c6881028aae" gracePeriod=30 Nov 24 08:16:31 crc kubenswrapper[4691]: I1124 08:16:31.664025 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="84240b43-dd9d-4cb6-842b-e76fb1e73081" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.206:8774/\": EOF" Nov 24 08:16:31 crc kubenswrapper[4691]: I1124 08:16:31.674465 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="84240b43-dd9d-4cb6-842b-e76fb1e73081" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.206:8774/\": EOF" Nov 24 08:16:31 crc kubenswrapper[4691]: I1124 08:16:31.681293 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 08:16:31 crc kubenswrapper[4691]: I1124 08:16:31.681777 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="730d583b-8557-4759-b3f6-f53fe9e3b73f" containerName="nova-scheduler-scheduler" containerID="cri-o://7ba747b7cfed0b02381efd4adfc3bf0b4cf1cf8c47ae0f637e1ff446cff4054e" gracePeriod=30 Nov 24 08:16:31 crc kubenswrapper[4691]: I1124 08:16:31.708335 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 08:16:31 crc kubenswrapper[4691]: I1124 08:16:31.708912 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="9549094b-f8c2-4131-89df-b6156a161466" containerName="nova-metadata-log" containerID="cri-o://86337d96e8e976e9aa45c76b32c593544c897b90fd0766119ed4331c183b750e" gracePeriod=30 Nov 24 08:16:31 crc kubenswrapper[4691]: I1124 08:16:31.709053 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="9549094b-f8c2-4131-89df-b6156a161466" containerName="nova-metadata-metadata" containerID="cri-o://b6407f48c11b4a2e192a41df9e7df85f301a3b58f9582ed74c25bc11bca45f95" gracePeriod=30 Nov 24 08:16:32 crc kubenswrapper[4691]: I1124 08:16:32.465321 4691 generic.go:334] "Generic (PLEG): container finished" podID="84240b43-dd9d-4cb6-842b-e76fb1e73081" containerID="a593951ee6b40c0bf650734ba7c69cfb263cebf6230aa6e17da8fb1a124b0a3f" exitCode=143 Nov 24 08:16:32 crc kubenswrapper[4691]: I1124 08:16:32.465379 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"84240b43-dd9d-4cb6-842b-e76fb1e73081","Type":"ContainerDied","Data":"a593951ee6b40c0bf650734ba7c69cfb263cebf6230aa6e17da8fb1a124b0a3f"} Nov 24 08:16:32 crc kubenswrapper[4691]: I1124 08:16:32.467713 4691 generic.go:334] "Generic (PLEG): container finished" podID="9549094b-f8c2-4131-89df-b6156a161466" containerID="86337d96e8e976e9aa45c76b32c593544c897b90fd0766119ed4331c183b750e" exitCode=143 Nov 24 08:16:32 crc kubenswrapper[4691]: I1124 08:16:32.467738 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9549094b-f8c2-4131-89df-b6156a161466","Type":"ContainerDied","Data":"86337d96e8e976e9aa45c76b32c593544c897b90fd0766119ed4331c183b750e"} Nov 24 08:16:33 crc kubenswrapper[4691]: E1124 08:16:33.126655 4691 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7ba747b7cfed0b02381efd4adfc3bf0b4cf1cf8c47ae0f637e1ff446cff4054e is running failed: container process not found" containerID="7ba747b7cfed0b02381efd4adfc3bf0b4cf1cf8c47ae0f637e1ff446cff4054e" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 24 08:16:33 crc kubenswrapper[4691]: E1124 08:16:33.127513 4691 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7ba747b7cfed0b02381efd4adfc3bf0b4cf1cf8c47ae0f637e1ff446cff4054e is running failed: container process not found" containerID="7ba747b7cfed0b02381efd4adfc3bf0b4cf1cf8c47ae0f637e1ff446cff4054e" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 24 08:16:33 crc kubenswrapper[4691]: E1124 08:16:33.127862 4691 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7ba747b7cfed0b02381efd4adfc3bf0b4cf1cf8c47ae0f637e1ff446cff4054e is running failed: container process not found" containerID="7ba747b7cfed0b02381efd4adfc3bf0b4cf1cf8c47ae0f637e1ff446cff4054e" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 24 08:16:33 crc kubenswrapper[4691]: E1124 08:16:33.127935 4691 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7ba747b7cfed0b02381efd4adfc3bf0b4cf1cf8c47ae0f637e1ff446cff4054e is running failed: container process not found" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="730d583b-8557-4759-b3f6-f53fe9e3b73f" containerName="nova-scheduler-scheduler" Nov 24 08:16:33 crc kubenswrapper[4691]: I1124 08:16:33.480510 4691 generic.go:334] "Generic (PLEG): container finished" podID="730d583b-8557-4759-b3f6-f53fe9e3b73f" containerID="7ba747b7cfed0b02381efd4adfc3bf0b4cf1cf8c47ae0f637e1ff446cff4054e" exitCode=0 Nov 24 08:16:33 crc kubenswrapper[4691]: I1124 08:16:33.480615 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"730d583b-8557-4759-b3f6-f53fe9e3b73f","Type":"ContainerDied","Data":"7ba747b7cfed0b02381efd4adfc3bf0b4cf1cf8c47ae0f637e1ff446cff4054e"} Nov 24 08:16:33 crc kubenswrapper[4691]: I1124 08:16:33.480988 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"730d583b-8557-4759-b3f6-f53fe9e3b73f","Type":"ContainerDied","Data":"91f06601d94bf00f45d094cd8ed40666da131e01de217a588f0e2549a3888153"} Nov 24 08:16:33 crc kubenswrapper[4691]: I1124 08:16:33.481012 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="91f06601d94bf00f45d094cd8ed40666da131e01de217a588f0e2549a3888153" Nov 24 08:16:33 crc kubenswrapper[4691]: I1124 08:16:33.484648 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0e783b59-54e1-401f-a281-b665848b7083","Type":"ContainerStarted","Data":"5b9620b63faf2def0b641a9de376d90074c9eeb850039c1a7c451088d69fcbd5"} Nov 24 08:16:33 crc kubenswrapper[4691]: I1124 08:16:33.532469 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 24 08:16:33 crc kubenswrapper[4691]: I1124 08:16:33.610711 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/730d583b-8557-4759-b3f6-f53fe9e3b73f-config-data\") pod \"730d583b-8557-4759-b3f6-f53fe9e3b73f\" (UID: \"730d583b-8557-4759-b3f6-f53fe9e3b73f\") " Nov 24 08:16:33 crc kubenswrapper[4691]: I1124 08:16:33.610860 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pc9db\" (UniqueName: \"kubernetes.io/projected/730d583b-8557-4759-b3f6-f53fe9e3b73f-kube-api-access-pc9db\") pod \"730d583b-8557-4759-b3f6-f53fe9e3b73f\" (UID: \"730d583b-8557-4759-b3f6-f53fe9e3b73f\") " Nov 24 08:16:33 crc kubenswrapper[4691]: I1124 08:16:33.610909 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/730d583b-8557-4759-b3f6-f53fe9e3b73f-combined-ca-bundle\") pod \"730d583b-8557-4759-b3f6-f53fe9e3b73f\" (UID: \"730d583b-8557-4759-b3f6-f53fe9e3b73f\") " Nov 24 08:16:33 crc kubenswrapper[4691]: I1124 08:16:33.617230 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/730d583b-8557-4759-b3f6-f53fe9e3b73f-kube-api-access-pc9db" (OuterVolumeSpecName: "kube-api-access-pc9db") pod "730d583b-8557-4759-b3f6-f53fe9e3b73f" (UID: "730d583b-8557-4759-b3f6-f53fe9e3b73f"). InnerVolumeSpecName "kube-api-access-pc9db". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:16:33 crc kubenswrapper[4691]: I1124 08:16:33.646329 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/730d583b-8557-4759-b3f6-f53fe9e3b73f-config-data" (OuterVolumeSpecName: "config-data") pod "730d583b-8557-4759-b3f6-f53fe9e3b73f" (UID: "730d583b-8557-4759-b3f6-f53fe9e3b73f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:16:33 crc kubenswrapper[4691]: I1124 08:16:33.646803 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/730d583b-8557-4759-b3f6-f53fe9e3b73f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "730d583b-8557-4759-b3f6-f53fe9e3b73f" (UID: "730d583b-8557-4759-b3f6-f53fe9e3b73f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:16:33 crc kubenswrapper[4691]: I1124 08:16:33.713467 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/730d583b-8557-4759-b3f6-f53fe9e3b73f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:33 crc kubenswrapper[4691]: I1124 08:16:33.713506 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/730d583b-8557-4759-b3f6-f53fe9e3b73f-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:33 crc kubenswrapper[4691]: I1124 08:16:33.713516 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pc9db\" (UniqueName: \"kubernetes.io/projected/730d583b-8557-4759-b3f6-f53fe9e3b73f-kube-api-access-pc9db\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:34 crc kubenswrapper[4691]: I1124 08:16:34.493200 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 24 08:16:34 crc kubenswrapper[4691]: I1124 08:16:34.527699 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 08:16:34 crc kubenswrapper[4691]: I1124 08:16:34.539062 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 08:16:34 crc kubenswrapper[4691]: I1124 08:16:34.548587 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 08:16:34 crc kubenswrapper[4691]: E1124 08:16:34.548994 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="730d583b-8557-4759-b3f6-f53fe9e3b73f" containerName="nova-scheduler-scheduler" Nov 24 08:16:34 crc kubenswrapper[4691]: I1124 08:16:34.549009 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="730d583b-8557-4759-b3f6-f53fe9e3b73f" containerName="nova-scheduler-scheduler" Nov 24 08:16:34 crc kubenswrapper[4691]: E1124 08:16:34.549032 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a34df957-8938-4332-9781-bad870fa9531" containerName="nova-manage" Nov 24 08:16:34 crc kubenswrapper[4691]: I1124 08:16:34.549039 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="a34df957-8938-4332-9781-bad870fa9531" containerName="nova-manage" Nov 24 08:16:34 crc kubenswrapper[4691]: I1124 08:16:34.549265 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="a34df957-8938-4332-9781-bad870fa9531" containerName="nova-manage" Nov 24 08:16:34 crc kubenswrapper[4691]: I1124 08:16:34.549298 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="730d583b-8557-4759-b3f6-f53fe9e3b73f" containerName="nova-scheduler-scheduler" Nov 24 08:16:34 crc kubenswrapper[4691]: I1124 08:16:34.549938 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 24 08:16:34 crc kubenswrapper[4691]: I1124 08:16:34.552796 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 24 08:16:34 crc kubenswrapper[4691]: I1124 08:16:34.566284 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 08:16:34 crc kubenswrapper[4691]: I1124 08:16:34.630280 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94098659-df1b-4792-b466-9e7a95bf19e2-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"94098659-df1b-4792-b466-9e7a95bf19e2\") " pod="openstack/nova-scheduler-0" Nov 24 08:16:34 crc kubenswrapper[4691]: I1124 08:16:34.630564 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94098659-df1b-4792-b466-9e7a95bf19e2-config-data\") pod \"nova-scheduler-0\" (UID: \"94098659-df1b-4792-b466-9e7a95bf19e2\") " pod="openstack/nova-scheduler-0" Nov 24 08:16:34 crc kubenswrapper[4691]: I1124 08:16:34.630636 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mwqlk\" (UniqueName: \"kubernetes.io/projected/94098659-df1b-4792-b466-9e7a95bf19e2-kube-api-access-mwqlk\") pod \"nova-scheduler-0\" (UID: \"94098659-df1b-4792-b466-9e7a95bf19e2\") " pod="openstack/nova-scheduler-0" Nov 24 08:16:34 crc kubenswrapper[4691]: I1124 08:16:34.732597 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94098659-df1b-4792-b466-9e7a95bf19e2-config-data\") pod \"nova-scheduler-0\" (UID: \"94098659-df1b-4792-b466-9e7a95bf19e2\") " pod="openstack/nova-scheduler-0" Nov 24 08:16:34 crc kubenswrapper[4691]: I1124 08:16:34.732686 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mwqlk\" (UniqueName: \"kubernetes.io/projected/94098659-df1b-4792-b466-9e7a95bf19e2-kube-api-access-mwqlk\") pod \"nova-scheduler-0\" (UID: \"94098659-df1b-4792-b466-9e7a95bf19e2\") " pod="openstack/nova-scheduler-0" Nov 24 08:16:34 crc kubenswrapper[4691]: I1124 08:16:34.732747 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94098659-df1b-4792-b466-9e7a95bf19e2-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"94098659-df1b-4792-b466-9e7a95bf19e2\") " pod="openstack/nova-scheduler-0" Nov 24 08:16:34 crc kubenswrapper[4691]: I1124 08:16:34.737058 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94098659-df1b-4792-b466-9e7a95bf19e2-config-data\") pod \"nova-scheduler-0\" (UID: \"94098659-df1b-4792-b466-9e7a95bf19e2\") " pod="openstack/nova-scheduler-0" Nov 24 08:16:34 crc kubenswrapper[4691]: I1124 08:16:34.738258 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94098659-df1b-4792-b466-9e7a95bf19e2-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"94098659-df1b-4792-b466-9e7a95bf19e2\") " pod="openstack/nova-scheduler-0" Nov 24 08:16:34 crc kubenswrapper[4691]: I1124 08:16:34.766089 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mwqlk\" (UniqueName: \"kubernetes.io/projected/94098659-df1b-4792-b466-9e7a95bf19e2-kube-api-access-mwqlk\") pod \"nova-scheduler-0\" (UID: \"94098659-df1b-4792-b466-9e7a95bf19e2\") " pod="openstack/nova-scheduler-0" Nov 24 08:16:34 crc kubenswrapper[4691]: I1124 08:16:34.773068 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="730d583b-8557-4759-b3f6-f53fe9e3b73f" path="/var/lib/kubelet/pods/730d583b-8557-4759-b3f6-f53fe9e3b73f/volumes" Nov 24 08:16:34 crc kubenswrapper[4691]: I1124 08:16:34.853274 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="9549094b-f8c2-4131-89df-b6156a161466" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.198:8775/\": read tcp 10.217.0.2:59094->10.217.0.198:8775: read: connection reset by peer" Nov 24 08:16:34 crc kubenswrapper[4691]: I1124 08:16:34.853752 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="9549094b-f8c2-4131-89df-b6156a161466" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.198:8775/\": read tcp 10.217.0.2:59108->10.217.0.198:8775: read: connection reset by peer" Nov 24 08:16:34 crc kubenswrapper[4691]: I1124 08:16:34.881078 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.378764 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.445279 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9549094b-f8c2-4131-89df-b6156a161466-combined-ca-bundle\") pod \"9549094b-f8c2-4131-89df-b6156a161466\" (UID: \"9549094b-f8c2-4131-89df-b6156a161466\") " Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.445883 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9549094b-f8c2-4131-89df-b6156a161466-nova-metadata-tls-certs\") pod \"9549094b-f8c2-4131-89df-b6156a161466\" (UID: \"9549094b-f8c2-4131-89df-b6156a161466\") " Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.445998 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qvwpr\" (UniqueName: \"kubernetes.io/projected/9549094b-f8c2-4131-89df-b6156a161466-kube-api-access-qvwpr\") pod \"9549094b-f8c2-4131-89df-b6156a161466\" (UID: \"9549094b-f8c2-4131-89df-b6156a161466\") " Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.446069 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9549094b-f8c2-4131-89df-b6156a161466-logs\") pod \"9549094b-f8c2-4131-89df-b6156a161466\" (UID: \"9549094b-f8c2-4131-89df-b6156a161466\") " Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.446123 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9549094b-f8c2-4131-89df-b6156a161466-config-data\") pod \"9549094b-f8c2-4131-89df-b6156a161466\" (UID: \"9549094b-f8c2-4131-89df-b6156a161466\") " Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.450222 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9549094b-f8c2-4131-89df-b6156a161466-logs" (OuterVolumeSpecName: "logs") pod "9549094b-f8c2-4131-89df-b6156a161466" (UID: "9549094b-f8c2-4131-89df-b6156a161466"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.487527 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9549094b-f8c2-4131-89df-b6156a161466-kube-api-access-qvwpr" (OuterVolumeSpecName: "kube-api-access-qvwpr") pod "9549094b-f8c2-4131-89df-b6156a161466" (UID: "9549094b-f8c2-4131-89df-b6156a161466"). InnerVolumeSpecName "kube-api-access-qvwpr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.495260 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.507573 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9549094b-f8c2-4131-89df-b6156a161466-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9549094b-f8c2-4131-89df-b6156a161466" (UID: "9549094b-f8c2-4131-89df-b6156a161466"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.515686 4691 generic.go:334] "Generic (PLEG): container finished" podID="9549094b-f8c2-4131-89df-b6156a161466" containerID="b6407f48c11b4a2e192a41df9e7df85f301a3b58f9582ed74c25bc11bca45f95" exitCode=0 Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.515762 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9549094b-f8c2-4131-89df-b6156a161466","Type":"ContainerDied","Data":"b6407f48c11b4a2e192a41df9e7df85f301a3b58f9582ed74c25bc11bca45f95"} Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.515796 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9549094b-f8c2-4131-89df-b6156a161466","Type":"ContainerDied","Data":"e96be060136a60e824bb1f41525f74375b910551d26a91ed338bc3e171013f52"} Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.515816 4691 scope.go:117] "RemoveContainer" containerID="b6407f48c11b4a2e192a41df9e7df85f301a3b58f9582ed74c25bc11bca45f95" Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.516059 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.517857 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9549094b-f8c2-4131-89df-b6156a161466-config-data" (OuterVolumeSpecName: "config-data") pod "9549094b-f8c2-4131-89df-b6156a161466" (UID: "9549094b-f8c2-4131-89df-b6156a161466"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.519728 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"94098659-df1b-4792-b466-9e7a95bf19e2","Type":"ContainerStarted","Data":"9c72a5471afab7f136927f7a29f847f81008238ea4227b4994157c2f9a5928ec"} Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.539927 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0e783b59-54e1-401f-a281-b665848b7083","Type":"ContainerStarted","Data":"9b258b01b8ae9ae030f2ab51e882762456205bada8e220b25d80a522906c8f9b"} Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.541225 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.547727 4691 scope.go:117] "RemoveContainer" containerID="86337d96e8e976e9aa45c76b32c593544c897b90fd0766119ed4331c183b750e" Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.548736 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9549094b-f8c2-4131-89df-b6156a161466-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.548763 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qvwpr\" (UniqueName: \"kubernetes.io/projected/9549094b-f8c2-4131-89df-b6156a161466-kube-api-access-qvwpr\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.548772 4691 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9549094b-f8c2-4131-89df-b6156a161466-logs\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.548782 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9549094b-f8c2-4131-89df-b6156a161466-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.562250 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9549094b-f8c2-4131-89df-b6156a161466-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "9549094b-f8c2-4131-89df-b6156a161466" (UID: "9549094b-f8c2-4131-89df-b6156a161466"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.568435 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.577226398 podStartE2EDuration="7.568417841s" podCreationTimestamp="2025-11-24 08:16:28 +0000 UTC" firstStartedPulling="2025-11-24 08:16:29.289531264 +0000 UTC m=+1151.288480513" lastFinishedPulling="2025-11-24 08:16:34.280722707 +0000 UTC m=+1156.279671956" observedRunningTime="2025-11-24 08:16:35.559525144 +0000 UTC m=+1157.558474413" watchObservedRunningTime="2025-11-24 08:16:35.568417841 +0000 UTC m=+1157.567367090" Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.580511 4691 scope.go:117] "RemoveContainer" containerID="b6407f48c11b4a2e192a41df9e7df85f301a3b58f9582ed74c25bc11bca45f95" Nov 24 08:16:35 crc kubenswrapper[4691]: E1124 08:16:35.581161 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b6407f48c11b4a2e192a41df9e7df85f301a3b58f9582ed74c25bc11bca45f95\": container with ID starting with b6407f48c11b4a2e192a41df9e7df85f301a3b58f9582ed74c25bc11bca45f95 not found: ID does not exist" containerID="b6407f48c11b4a2e192a41df9e7df85f301a3b58f9582ed74c25bc11bca45f95" Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.581221 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b6407f48c11b4a2e192a41df9e7df85f301a3b58f9582ed74c25bc11bca45f95"} err="failed to get container status \"b6407f48c11b4a2e192a41df9e7df85f301a3b58f9582ed74c25bc11bca45f95\": rpc error: code = NotFound desc = could not find container \"b6407f48c11b4a2e192a41df9e7df85f301a3b58f9582ed74c25bc11bca45f95\": container with ID starting with b6407f48c11b4a2e192a41df9e7df85f301a3b58f9582ed74c25bc11bca45f95 not found: ID does not exist" Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.581250 4691 scope.go:117] "RemoveContainer" containerID="86337d96e8e976e9aa45c76b32c593544c897b90fd0766119ed4331c183b750e" Nov 24 08:16:35 crc kubenswrapper[4691]: E1124 08:16:35.584734 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86337d96e8e976e9aa45c76b32c593544c897b90fd0766119ed4331c183b750e\": container with ID starting with 86337d96e8e976e9aa45c76b32c593544c897b90fd0766119ed4331c183b750e not found: ID does not exist" containerID="86337d96e8e976e9aa45c76b32c593544c897b90fd0766119ed4331c183b750e" Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.584779 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86337d96e8e976e9aa45c76b32c593544c897b90fd0766119ed4331c183b750e"} err="failed to get container status \"86337d96e8e976e9aa45c76b32c593544c897b90fd0766119ed4331c183b750e\": rpc error: code = NotFound desc = could not find container \"86337d96e8e976e9aa45c76b32c593544c897b90fd0766119ed4331c183b750e\": container with ID starting with 86337d96e8e976e9aa45c76b32c593544c897b90fd0766119ed4331c183b750e not found: ID does not exist" Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.654806 4691 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9549094b-f8c2-4131-89df-b6156a161466-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.858160 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.873139 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.897621 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 24 08:16:35 crc kubenswrapper[4691]: E1124 08:16:35.898041 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9549094b-f8c2-4131-89df-b6156a161466" containerName="nova-metadata-metadata" Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.898056 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="9549094b-f8c2-4131-89df-b6156a161466" containerName="nova-metadata-metadata" Nov 24 08:16:35 crc kubenswrapper[4691]: E1124 08:16:35.898106 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9549094b-f8c2-4131-89df-b6156a161466" containerName="nova-metadata-log" Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.898112 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="9549094b-f8c2-4131-89df-b6156a161466" containerName="nova-metadata-log" Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.898278 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="9549094b-f8c2-4131-89df-b6156a161466" containerName="nova-metadata-metadata" Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.898294 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="9549094b-f8c2-4131-89df-b6156a161466" containerName="nova-metadata-log" Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.899471 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.904413 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.904681 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.968286 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gv6sk\" (UniqueName: \"kubernetes.io/projected/816aeaf6-40c5-4859-b819-bcfb46750549-kube-api-access-gv6sk\") pod \"nova-metadata-0\" (UID: \"816aeaf6-40c5-4859-b819-bcfb46750549\") " pod="openstack/nova-metadata-0" Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.968358 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/816aeaf6-40c5-4859-b819-bcfb46750549-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"816aeaf6-40c5-4859-b819-bcfb46750549\") " pod="openstack/nova-metadata-0" Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.968387 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/816aeaf6-40c5-4859-b819-bcfb46750549-config-data\") pod \"nova-metadata-0\" (UID: \"816aeaf6-40c5-4859-b819-bcfb46750549\") " pod="openstack/nova-metadata-0" Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.968462 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/816aeaf6-40c5-4859-b819-bcfb46750549-logs\") pod \"nova-metadata-0\" (UID: \"816aeaf6-40c5-4859-b819-bcfb46750549\") " pod="openstack/nova-metadata-0" Nov 24 08:16:35 crc kubenswrapper[4691]: I1124 08:16:35.968528 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/816aeaf6-40c5-4859-b819-bcfb46750549-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"816aeaf6-40c5-4859-b819-bcfb46750549\") " pod="openstack/nova-metadata-0" Nov 24 08:16:36 crc kubenswrapper[4691]: I1124 08:16:36.017464 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 08:16:36 crc kubenswrapper[4691]: I1124 08:16:36.071629 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/816aeaf6-40c5-4859-b819-bcfb46750549-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"816aeaf6-40c5-4859-b819-bcfb46750549\") " pod="openstack/nova-metadata-0" Nov 24 08:16:36 crc kubenswrapper[4691]: I1124 08:16:36.071705 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gv6sk\" (UniqueName: \"kubernetes.io/projected/816aeaf6-40c5-4859-b819-bcfb46750549-kube-api-access-gv6sk\") pod \"nova-metadata-0\" (UID: \"816aeaf6-40c5-4859-b819-bcfb46750549\") " pod="openstack/nova-metadata-0" Nov 24 08:16:36 crc kubenswrapper[4691]: I1124 08:16:36.071741 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/816aeaf6-40c5-4859-b819-bcfb46750549-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"816aeaf6-40c5-4859-b819-bcfb46750549\") " pod="openstack/nova-metadata-0" Nov 24 08:16:36 crc kubenswrapper[4691]: I1124 08:16:36.071761 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/816aeaf6-40c5-4859-b819-bcfb46750549-config-data\") pod \"nova-metadata-0\" (UID: \"816aeaf6-40c5-4859-b819-bcfb46750549\") " pod="openstack/nova-metadata-0" Nov 24 08:16:36 crc kubenswrapper[4691]: I1124 08:16:36.071813 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/816aeaf6-40c5-4859-b819-bcfb46750549-logs\") pod \"nova-metadata-0\" (UID: \"816aeaf6-40c5-4859-b819-bcfb46750549\") " pod="openstack/nova-metadata-0" Nov 24 08:16:36 crc kubenswrapper[4691]: I1124 08:16:36.072611 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/816aeaf6-40c5-4859-b819-bcfb46750549-logs\") pod \"nova-metadata-0\" (UID: \"816aeaf6-40c5-4859-b819-bcfb46750549\") " pod="openstack/nova-metadata-0" Nov 24 08:16:36 crc kubenswrapper[4691]: I1124 08:16:36.085282 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/816aeaf6-40c5-4859-b819-bcfb46750549-config-data\") pod \"nova-metadata-0\" (UID: \"816aeaf6-40c5-4859-b819-bcfb46750549\") " pod="openstack/nova-metadata-0" Nov 24 08:16:36 crc kubenswrapper[4691]: I1124 08:16:36.085832 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/816aeaf6-40c5-4859-b819-bcfb46750549-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"816aeaf6-40c5-4859-b819-bcfb46750549\") " pod="openstack/nova-metadata-0" Nov 24 08:16:36 crc kubenswrapper[4691]: I1124 08:16:36.086158 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/816aeaf6-40c5-4859-b819-bcfb46750549-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"816aeaf6-40c5-4859-b819-bcfb46750549\") " pod="openstack/nova-metadata-0" Nov 24 08:16:36 crc kubenswrapper[4691]: I1124 08:16:36.097976 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gv6sk\" (UniqueName: \"kubernetes.io/projected/816aeaf6-40c5-4859-b819-bcfb46750549-kube-api-access-gv6sk\") pod \"nova-metadata-0\" (UID: \"816aeaf6-40c5-4859-b819-bcfb46750549\") " pod="openstack/nova-metadata-0" Nov 24 08:16:36 crc kubenswrapper[4691]: I1124 08:16:36.220064 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 24 08:16:36 crc kubenswrapper[4691]: I1124 08:16:36.569072 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"94098659-df1b-4792-b466-9e7a95bf19e2","Type":"ContainerStarted","Data":"11488bee2997289b0718cbba5f1c84bdf7f88514c50f43bd9e30c7c8e53fd9b1"} Nov 24 08:16:36 crc kubenswrapper[4691]: I1124 08:16:36.585759 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.5857380450000003 podStartE2EDuration="2.585738045s" podCreationTimestamp="2025-11-24 08:16:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:16:36.584843309 +0000 UTC m=+1158.583792568" watchObservedRunningTime="2025-11-24 08:16:36.585738045 +0000 UTC m=+1158.584687294" Nov 24 08:16:36 crc kubenswrapper[4691]: I1124 08:16:36.703893 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 24 08:16:36 crc kubenswrapper[4691]: W1124 08:16:36.712386 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod816aeaf6_40c5_4859_b819_bcfb46750549.slice/crio-c8c8fb80336e9486be3dc1ee0c35d2853e902ead117e6462e61bc7d309537dfa WatchSource:0}: Error finding container c8c8fb80336e9486be3dc1ee0c35d2853e902ead117e6462e61bc7d309537dfa: Status 404 returned error can't find the container with id c8c8fb80336e9486be3dc1ee0c35d2853e902ead117e6462e61bc7d309537dfa Nov 24 08:16:36 crc kubenswrapper[4691]: I1124 08:16:36.774918 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9549094b-f8c2-4131-89df-b6156a161466" path="/var/lib/kubelet/pods/9549094b-f8c2-4131-89df-b6156a161466/volumes" Nov 24 08:16:37 crc kubenswrapper[4691]: I1124 08:16:37.580477 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"816aeaf6-40c5-4859-b819-bcfb46750549","Type":"ContainerStarted","Data":"0e5b34857509352f1452d1446b936664dac1aeff28c37dd6d05dfe44a9836290"} Nov 24 08:16:37 crc kubenswrapper[4691]: I1124 08:16:37.582118 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"816aeaf6-40c5-4859-b819-bcfb46750549","Type":"ContainerStarted","Data":"239488cecf59555d7236f28c3d3e503358425e4c1ad231bb4f81cfe3947d607b"} Nov 24 08:16:37 crc kubenswrapper[4691]: I1124 08:16:37.582225 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"816aeaf6-40c5-4859-b819-bcfb46750549","Type":"ContainerStarted","Data":"c8c8fb80336e9486be3dc1ee0c35d2853e902ead117e6462e61bc7d309537dfa"} Nov 24 08:16:37 crc kubenswrapper[4691]: I1124 08:16:37.608079 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.608053512 podStartE2EDuration="2.608053512s" podCreationTimestamp="2025-11-24 08:16:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:16:37.60625688 +0000 UTC m=+1159.605206129" watchObservedRunningTime="2025-11-24 08:16:37.608053512 +0000 UTC m=+1159.607002771" Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.572596 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.600330 4691 generic.go:334] "Generic (PLEG): container finished" podID="84240b43-dd9d-4cb6-842b-e76fb1e73081" containerID="33b42f0209f45dedf4aca480bff738bfecc7ace66e44df4f8d5b3c6881028aae" exitCode=0 Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.600377 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.600497 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"84240b43-dd9d-4cb6-842b-e76fb1e73081","Type":"ContainerDied","Data":"33b42f0209f45dedf4aca480bff738bfecc7ace66e44df4f8d5b3c6881028aae"} Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.600525 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"84240b43-dd9d-4cb6-842b-e76fb1e73081","Type":"ContainerDied","Data":"3d7b1f76d9d53bee81e777fda670a690f5323a1911c5ab83d0901d77ae8354d7"} Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.600561 4691 scope.go:117] "RemoveContainer" containerID="33b42f0209f45dedf4aca480bff738bfecc7ace66e44df4f8d5b3c6881028aae" Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.633204 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j9w5g\" (UniqueName: \"kubernetes.io/projected/84240b43-dd9d-4cb6-842b-e76fb1e73081-kube-api-access-j9w5g\") pod \"84240b43-dd9d-4cb6-842b-e76fb1e73081\" (UID: \"84240b43-dd9d-4cb6-842b-e76fb1e73081\") " Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.633287 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/84240b43-dd9d-4cb6-842b-e76fb1e73081-public-tls-certs\") pod \"84240b43-dd9d-4cb6-842b-e76fb1e73081\" (UID: \"84240b43-dd9d-4cb6-842b-e76fb1e73081\") " Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.634072 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/84240b43-dd9d-4cb6-842b-e76fb1e73081-logs\") pod \"84240b43-dd9d-4cb6-842b-e76fb1e73081\" (UID: \"84240b43-dd9d-4cb6-842b-e76fb1e73081\") " Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.634127 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/84240b43-dd9d-4cb6-842b-e76fb1e73081-internal-tls-certs\") pod \"84240b43-dd9d-4cb6-842b-e76fb1e73081\" (UID: \"84240b43-dd9d-4cb6-842b-e76fb1e73081\") " Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.634277 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84240b43-dd9d-4cb6-842b-e76fb1e73081-combined-ca-bundle\") pod \"84240b43-dd9d-4cb6-842b-e76fb1e73081\" (UID: \"84240b43-dd9d-4cb6-842b-e76fb1e73081\") " Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.634340 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84240b43-dd9d-4cb6-842b-e76fb1e73081-config-data\") pod \"84240b43-dd9d-4cb6-842b-e76fb1e73081\" (UID: \"84240b43-dd9d-4cb6-842b-e76fb1e73081\") " Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.635116 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/84240b43-dd9d-4cb6-842b-e76fb1e73081-logs" (OuterVolumeSpecName: "logs") pod "84240b43-dd9d-4cb6-842b-e76fb1e73081" (UID: "84240b43-dd9d-4cb6-842b-e76fb1e73081"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.638007 4691 scope.go:117] "RemoveContainer" containerID="a593951ee6b40c0bf650734ba7c69cfb263cebf6230aa6e17da8fb1a124b0a3f" Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.644918 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84240b43-dd9d-4cb6-842b-e76fb1e73081-kube-api-access-j9w5g" (OuterVolumeSpecName: "kube-api-access-j9w5g") pod "84240b43-dd9d-4cb6-842b-e76fb1e73081" (UID: "84240b43-dd9d-4cb6-842b-e76fb1e73081"). InnerVolumeSpecName "kube-api-access-j9w5g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.665742 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84240b43-dd9d-4cb6-842b-e76fb1e73081-config-data" (OuterVolumeSpecName: "config-data") pod "84240b43-dd9d-4cb6-842b-e76fb1e73081" (UID: "84240b43-dd9d-4cb6-842b-e76fb1e73081"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.668121 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84240b43-dd9d-4cb6-842b-e76fb1e73081-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "84240b43-dd9d-4cb6-842b-e76fb1e73081" (UID: "84240b43-dd9d-4cb6-842b-e76fb1e73081"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.701799 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84240b43-dd9d-4cb6-842b-e76fb1e73081-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "84240b43-dd9d-4cb6-842b-e76fb1e73081" (UID: "84240b43-dd9d-4cb6-842b-e76fb1e73081"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.711868 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84240b43-dd9d-4cb6-842b-e76fb1e73081-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "84240b43-dd9d-4cb6-842b-e76fb1e73081" (UID: "84240b43-dd9d-4cb6-842b-e76fb1e73081"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.737018 4691 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/84240b43-dd9d-4cb6-842b-e76fb1e73081-logs\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.737060 4691 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/84240b43-dd9d-4cb6-842b-e76fb1e73081-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.737076 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84240b43-dd9d-4cb6-842b-e76fb1e73081-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.737086 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84240b43-dd9d-4cb6-842b-e76fb1e73081-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.737098 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j9w5g\" (UniqueName: \"kubernetes.io/projected/84240b43-dd9d-4cb6-842b-e76fb1e73081-kube-api-access-j9w5g\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.737110 4691 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/84240b43-dd9d-4cb6-842b-e76fb1e73081-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.757709 4691 scope.go:117] "RemoveContainer" containerID="33b42f0209f45dedf4aca480bff738bfecc7ace66e44df4f8d5b3c6881028aae" Nov 24 08:16:38 crc kubenswrapper[4691]: E1124 08:16:38.758335 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"33b42f0209f45dedf4aca480bff738bfecc7ace66e44df4f8d5b3c6881028aae\": container with ID starting with 33b42f0209f45dedf4aca480bff738bfecc7ace66e44df4f8d5b3c6881028aae not found: ID does not exist" containerID="33b42f0209f45dedf4aca480bff738bfecc7ace66e44df4f8d5b3c6881028aae" Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.758388 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"33b42f0209f45dedf4aca480bff738bfecc7ace66e44df4f8d5b3c6881028aae"} err="failed to get container status \"33b42f0209f45dedf4aca480bff738bfecc7ace66e44df4f8d5b3c6881028aae\": rpc error: code = NotFound desc = could not find container \"33b42f0209f45dedf4aca480bff738bfecc7ace66e44df4f8d5b3c6881028aae\": container with ID starting with 33b42f0209f45dedf4aca480bff738bfecc7ace66e44df4f8d5b3c6881028aae not found: ID does not exist" Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.758414 4691 scope.go:117] "RemoveContainer" containerID="a593951ee6b40c0bf650734ba7c69cfb263cebf6230aa6e17da8fb1a124b0a3f" Nov 24 08:16:38 crc kubenswrapper[4691]: E1124 08:16:38.758882 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a593951ee6b40c0bf650734ba7c69cfb263cebf6230aa6e17da8fb1a124b0a3f\": container with ID starting with a593951ee6b40c0bf650734ba7c69cfb263cebf6230aa6e17da8fb1a124b0a3f not found: ID does not exist" containerID="a593951ee6b40c0bf650734ba7c69cfb263cebf6230aa6e17da8fb1a124b0a3f" Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.758922 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a593951ee6b40c0bf650734ba7c69cfb263cebf6230aa6e17da8fb1a124b0a3f"} err="failed to get container status \"a593951ee6b40c0bf650734ba7c69cfb263cebf6230aa6e17da8fb1a124b0a3f\": rpc error: code = NotFound desc = could not find container \"a593951ee6b40c0bf650734ba7c69cfb263cebf6230aa6e17da8fb1a124b0a3f\": container with ID starting with a593951ee6b40c0bf650734ba7c69cfb263cebf6230aa6e17da8fb1a124b0a3f not found: ID does not exist" Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.929035 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.948957 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.969175 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 24 08:16:38 crc kubenswrapper[4691]: E1124 08:16:38.969866 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84240b43-dd9d-4cb6-842b-e76fb1e73081" containerName="nova-api-api" Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.969891 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="84240b43-dd9d-4cb6-842b-e76fb1e73081" containerName="nova-api-api" Nov 24 08:16:38 crc kubenswrapper[4691]: E1124 08:16:38.969939 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84240b43-dd9d-4cb6-842b-e76fb1e73081" containerName="nova-api-log" Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.969950 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="84240b43-dd9d-4cb6-842b-e76fb1e73081" containerName="nova-api-log" Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.970300 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="84240b43-dd9d-4cb6-842b-e76fb1e73081" containerName="nova-api-log" Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.970329 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="84240b43-dd9d-4cb6-842b-e76fb1e73081" containerName="nova-api-api" Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.972097 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.978606 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.978841 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.982848 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 24 08:16:38 crc kubenswrapper[4691]: I1124 08:16:38.991603 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 24 08:16:39 crc kubenswrapper[4691]: I1124 08:16:39.042348 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hbg7b\" (UniqueName: \"kubernetes.io/projected/5f883df6-eeae-475d-80e8-ef121d343ae7-kube-api-access-hbg7b\") pod \"nova-api-0\" (UID: \"5f883df6-eeae-475d-80e8-ef121d343ae7\") " pod="openstack/nova-api-0" Nov 24 08:16:39 crc kubenswrapper[4691]: I1124 08:16:39.042428 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f883df6-eeae-475d-80e8-ef121d343ae7-config-data\") pod \"nova-api-0\" (UID: \"5f883df6-eeae-475d-80e8-ef121d343ae7\") " pod="openstack/nova-api-0" Nov 24 08:16:39 crc kubenswrapper[4691]: I1124 08:16:39.043319 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f883df6-eeae-475d-80e8-ef121d343ae7-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5f883df6-eeae-475d-80e8-ef121d343ae7\") " pod="openstack/nova-api-0" Nov 24 08:16:39 crc kubenswrapper[4691]: I1124 08:16:39.043402 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5f883df6-eeae-475d-80e8-ef121d343ae7-public-tls-certs\") pod \"nova-api-0\" (UID: \"5f883df6-eeae-475d-80e8-ef121d343ae7\") " pod="openstack/nova-api-0" Nov 24 08:16:39 crc kubenswrapper[4691]: I1124 08:16:39.045921 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5f883df6-eeae-475d-80e8-ef121d343ae7-internal-tls-certs\") pod \"nova-api-0\" (UID: \"5f883df6-eeae-475d-80e8-ef121d343ae7\") " pod="openstack/nova-api-0" Nov 24 08:16:39 crc kubenswrapper[4691]: I1124 08:16:39.045985 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5f883df6-eeae-475d-80e8-ef121d343ae7-logs\") pod \"nova-api-0\" (UID: \"5f883df6-eeae-475d-80e8-ef121d343ae7\") " pod="openstack/nova-api-0" Nov 24 08:16:39 crc kubenswrapper[4691]: I1124 08:16:39.147271 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hbg7b\" (UniqueName: \"kubernetes.io/projected/5f883df6-eeae-475d-80e8-ef121d343ae7-kube-api-access-hbg7b\") pod \"nova-api-0\" (UID: \"5f883df6-eeae-475d-80e8-ef121d343ae7\") " pod="openstack/nova-api-0" Nov 24 08:16:39 crc kubenswrapper[4691]: I1124 08:16:39.147331 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f883df6-eeae-475d-80e8-ef121d343ae7-config-data\") pod \"nova-api-0\" (UID: \"5f883df6-eeae-475d-80e8-ef121d343ae7\") " pod="openstack/nova-api-0" Nov 24 08:16:39 crc kubenswrapper[4691]: I1124 08:16:39.147356 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f883df6-eeae-475d-80e8-ef121d343ae7-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5f883df6-eeae-475d-80e8-ef121d343ae7\") " pod="openstack/nova-api-0" Nov 24 08:16:39 crc kubenswrapper[4691]: I1124 08:16:39.147392 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5f883df6-eeae-475d-80e8-ef121d343ae7-public-tls-certs\") pod \"nova-api-0\" (UID: \"5f883df6-eeae-475d-80e8-ef121d343ae7\") " pod="openstack/nova-api-0" Nov 24 08:16:39 crc kubenswrapper[4691]: I1124 08:16:39.147556 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5f883df6-eeae-475d-80e8-ef121d343ae7-internal-tls-certs\") pod \"nova-api-0\" (UID: \"5f883df6-eeae-475d-80e8-ef121d343ae7\") " pod="openstack/nova-api-0" Nov 24 08:16:39 crc kubenswrapper[4691]: I1124 08:16:39.147581 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5f883df6-eeae-475d-80e8-ef121d343ae7-logs\") pod \"nova-api-0\" (UID: \"5f883df6-eeae-475d-80e8-ef121d343ae7\") " pod="openstack/nova-api-0" Nov 24 08:16:39 crc kubenswrapper[4691]: I1124 08:16:39.148091 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5f883df6-eeae-475d-80e8-ef121d343ae7-logs\") pod \"nova-api-0\" (UID: \"5f883df6-eeae-475d-80e8-ef121d343ae7\") " pod="openstack/nova-api-0" Nov 24 08:16:39 crc kubenswrapper[4691]: I1124 08:16:39.151602 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f883df6-eeae-475d-80e8-ef121d343ae7-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5f883df6-eeae-475d-80e8-ef121d343ae7\") " pod="openstack/nova-api-0" Nov 24 08:16:39 crc kubenswrapper[4691]: I1124 08:16:39.151613 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f883df6-eeae-475d-80e8-ef121d343ae7-config-data\") pod \"nova-api-0\" (UID: \"5f883df6-eeae-475d-80e8-ef121d343ae7\") " pod="openstack/nova-api-0" Nov 24 08:16:39 crc kubenswrapper[4691]: I1124 08:16:39.151839 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5f883df6-eeae-475d-80e8-ef121d343ae7-public-tls-certs\") pod \"nova-api-0\" (UID: \"5f883df6-eeae-475d-80e8-ef121d343ae7\") " pod="openstack/nova-api-0" Nov 24 08:16:39 crc kubenswrapper[4691]: I1124 08:16:39.152553 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5f883df6-eeae-475d-80e8-ef121d343ae7-internal-tls-certs\") pod \"nova-api-0\" (UID: \"5f883df6-eeae-475d-80e8-ef121d343ae7\") " pod="openstack/nova-api-0" Nov 24 08:16:39 crc kubenswrapper[4691]: I1124 08:16:39.164306 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hbg7b\" (UniqueName: \"kubernetes.io/projected/5f883df6-eeae-475d-80e8-ef121d343ae7-kube-api-access-hbg7b\") pod \"nova-api-0\" (UID: \"5f883df6-eeae-475d-80e8-ef121d343ae7\") " pod="openstack/nova-api-0" Nov 24 08:16:39 crc kubenswrapper[4691]: I1124 08:16:39.311385 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 24 08:16:39 crc kubenswrapper[4691]: I1124 08:16:39.792472 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 24 08:16:39 crc kubenswrapper[4691]: I1124 08:16:39.881860 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 24 08:16:40 crc kubenswrapper[4691]: I1124 08:16:40.622722 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5f883df6-eeae-475d-80e8-ef121d343ae7","Type":"ContainerStarted","Data":"0058995487f9f8b29bd1d5d0e012f5810ff9dda2bcd08ce0924b68a9e804e464"} Nov 24 08:16:40 crc kubenswrapper[4691]: I1124 08:16:40.622772 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5f883df6-eeae-475d-80e8-ef121d343ae7","Type":"ContainerStarted","Data":"facb69995845ad05e0fc9a0e2fea37245040d0d4e3f8ed4b9120aab196772f0e"} Nov 24 08:16:40 crc kubenswrapper[4691]: I1124 08:16:40.622782 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5f883df6-eeae-475d-80e8-ef121d343ae7","Type":"ContainerStarted","Data":"36c40c8ec25b361ac1bf45fb05bf8de44fb2651f12efee4fa8c34dc6a177c424"} Nov 24 08:16:40 crc kubenswrapper[4691]: I1124 08:16:40.666676 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.666653144 podStartE2EDuration="2.666653144s" podCreationTimestamp="2025-11-24 08:16:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:16:40.642175525 +0000 UTC m=+1162.641124774" watchObservedRunningTime="2025-11-24 08:16:40.666653144 +0000 UTC m=+1162.665602393" Nov 24 08:16:40 crc kubenswrapper[4691]: I1124 08:16:40.775598 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="84240b43-dd9d-4cb6-842b-e76fb1e73081" path="/var/lib/kubelet/pods/84240b43-dd9d-4cb6-842b-e76fb1e73081/volumes" Nov 24 08:16:41 crc kubenswrapper[4691]: I1124 08:16:41.220823 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 24 08:16:41 crc kubenswrapper[4691]: I1124 08:16:41.221229 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 24 08:16:44 crc kubenswrapper[4691]: I1124 08:16:44.881513 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 24 08:16:44 crc kubenswrapper[4691]: I1124 08:16:44.910499 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 24 08:16:45 crc kubenswrapper[4691]: I1124 08:16:45.705841 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 24 08:16:46 crc kubenswrapper[4691]: I1124 08:16:46.221061 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 24 08:16:46 crc kubenswrapper[4691]: I1124 08:16:46.221596 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 24 08:16:47 crc kubenswrapper[4691]: I1124 08:16:47.235799 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="816aeaf6-40c5-4859-b819-bcfb46750549" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.210:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 24 08:16:47 crc kubenswrapper[4691]: I1124 08:16:47.235828 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="816aeaf6-40c5-4859-b819-bcfb46750549" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.210:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 24 08:16:49 crc kubenswrapper[4691]: I1124 08:16:49.311703 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 24 08:16:49 crc kubenswrapper[4691]: I1124 08:16:49.312010 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 24 08:16:50 crc kubenswrapper[4691]: I1124 08:16:50.326833 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="5f883df6-eeae-475d-80e8-ef121d343ae7" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.211:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 24 08:16:50 crc kubenswrapper[4691]: I1124 08:16:50.326822 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="5f883df6-eeae-475d-80e8-ef121d343ae7" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.211:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 24 08:16:51 crc kubenswrapper[4691]: I1124 08:16:51.089875 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:16:51 crc kubenswrapper[4691]: I1124 08:16:51.089952 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:16:51 crc kubenswrapper[4691]: I1124 08:16:51.090020 4691 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 08:16:51 crc kubenswrapper[4691]: I1124 08:16:51.091039 4691 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"914d4816a735e64c132874c27ca7b7bbe33f77f07f7911089de6ac4d29c8f36b"} pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 08:16:51 crc kubenswrapper[4691]: I1124 08:16:51.091173 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" containerID="cri-o://914d4816a735e64c132874c27ca7b7bbe33f77f07f7911089de6ac4d29c8f36b" gracePeriod=600 Nov 24 08:16:51 crc kubenswrapper[4691]: I1124 08:16:51.746097 4691 generic.go:334] "Generic (PLEG): container finished" podID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerID="914d4816a735e64c132874c27ca7b7bbe33f77f07f7911089de6ac4d29c8f36b" exitCode=0 Nov 24 08:16:51 crc kubenswrapper[4691]: I1124 08:16:51.746196 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerDied","Data":"914d4816a735e64c132874c27ca7b7bbe33f77f07f7911089de6ac4d29c8f36b"} Nov 24 08:16:51 crc kubenswrapper[4691]: I1124 08:16:51.746531 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerStarted","Data":"e9457e7b6c4c145ad63f8bf4661b5a86c1fa3b74970f7b54d789669ee7203629"} Nov 24 08:16:51 crc kubenswrapper[4691]: I1124 08:16:51.746560 4691 scope.go:117] "RemoveContainer" containerID="8d580292dc3a8a86e61ece515d1a697fe0192e1bffaa2352b8d538c10b88fced" Nov 24 08:16:56 crc kubenswrapper[4691]: I1124 08:16:56.230480 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 24 08:16:56 crc kubenswrapper[4691]: I1124 08:16:56.231279 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 24 08:16:56 crc kubenswrapper[4691]: I1124 08:16:56.236745 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 24 08:16:56 crc kubenswrapper[4691]: I1124 08:16:56.238351 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 24 08:16:58 crc kubenswrapper[4691]: I1124 08:16:58.846277 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 24 08:16:59 crc kubenswrapper[4691]: I1124 08:16:59.320620 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 24 08:16:59 crc kubenswrapper[4691]: I1124 08:16:59.321281 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 24 08:16:59 crc kubenswrapper[4691]: I1124 08:16:59.326393 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 24 08:16:59 crc kubenswrapper[4691]: I1124 08:16:59.331254 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 24 08:16:59 crc kubenswrapper[4691]: I1124 08:16:59.849048 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 24 08:16:59 crc kubenswrapper[4691]: I1124 08:16:59.857382 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 24 08:17:08 crc kubenswrapper[4691]: I1124 08:17:08.705061 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 24 08:17:09 crc kubenswrapper[4691]: I1124 08:17:09.598793 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 24 08:17:13 crc kubenswrapper[4691]: I1124 08:17:13.941748 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="224d72d8-5d0a-48df-8930-2cb28fc1fd93" containerName="rabbitmq" containerID="cri-o://cecf5374f4f9703c942989740b54a5434cff6537997e0fb2b48d18ffe7aa244a" gracePeriod=604795 Nov 24 08:17:15 crc kubenswrapper[4691]: I1124 08:17:15.184876 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="60038211-87c8-4170-8fd0-35df8a16aa92" containerName="rabbitmq" containerID="cri-o://5484e06182bf74af6a6cbfcea515b199c54cae93a2775659f4f489265709e66f" gracePeriod=604795 Nov 24 08:17:19 crc kubenswrapper[4691]: I1124 08:17:19.683921 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-d558885bc-hhp85"] Nov 24 08:17:19 crc kubenswrapper[4691]: I1124 08:17:19.687933 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d558885bc-hhp85" Nov 24 08:17:19 crc kubenswrapper[4691]: I1124 08:17:19.691179 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Nov 24 08:17:19 crc kubenswrapper[4691]: I1124 08:17:19.699623 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-d558885bc-hhp85"] Nov 24 08:17:19 crc kubenswrapper[4691]: I1124 08:17:19.796559 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-openstack-edpm-ipam\") pod \"dnsmasq-dns-d558885bc-hhp85\" (UID: \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\") " pod="openstack/dnsmasq-dns-d558885bc-hhp85" Nov 24 08:17:19 crc kubenswrapper[4691]: I1124 08:17:19.796597 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-ovsdbserver-sb\") pod \"dnsmasq-dns-d558885bc-hhp85\" (UID: \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\") " pod="openstack/dnsmasq-dns-d558885bc-hhp85" Nov 24 08:17:19 crc kubenswrapper[4691]: I1124 08:17:19.796614 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-ovsdbserver-nb\") pod \"dnsmasq-dns-d558885bc-hhp85\" (UID: \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\") " pod="openstack/dnsmasq-dns-d558885bc-hhp85" Nov 24 08:17:19 crc kubenswrapper[4691]: I1124 08:17:19.796932 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l7t4k\" (UniqueName: \"kubernetes.io/projected/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-kube-api-access-l7t4k\") pod \"dnsmasq-dns-d558885bc-hhp85\" (UID: \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\") " pod="openstack/dnsmasq-dns-d558885bc-hhp85" Nov 24 08:17:19 crc kubenswrapper[4691]: I1124 08:17:19.797010 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-config\") pod \"dnsmasq-dns-d558885bc-hhp85\" (UID: \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\") " pod="openstack/dnsmasq-dns-d558885bc-hhp85" Nov 24 08:17:19 crc kubenswrapper[4691]: I1124 08:17:19.797059 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-dns-swift-storage-0\") pod \"dnsmasq-dns-d558885bc-hhp85\" (UID: \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\") " pod="openstack/dnsmasq-dns-d558885bc-hhp85" Nov 24 08:17:19 crc kubenswrapper[4691]: I1124 08:17:19.797126 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-dns-svc\") pod \"dnsmasq-dns-d558885bc-hhp85\" (UID: \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\") " pod="openstack/dnsmasq-dns-d558885bc-hhp85" Nov 24 08:17:19 crc kubenswrapper[4691]: I1124 08:17:19.898837 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l7t4k\" (UniqueName: \"kubernetes.io/projected/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-kube-api-access-l7t4k\") pod \"dnsmasq-dns-d558885bc-hhp85\" (UID: \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\") " pod="openstack/dnsmasq-dns-d558885bc-hhp85" Nov 24 08:17:19 crc kubenswrapper[4691]: I1124 08:17:19.898888 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-config\") pod \"dnsmasq-dns-d558885bc-hhp85\" (UID: \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\") " pod="openstack/dnsmasq-dns-d558885bc-hhp85" Nov 24 08:17:19 crc kubenswrapper[4691]: I1124 08:17:19.898936 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-dns-swift-storage-0\") pod \"dnsmasq-dns-d558885bc-hhp85\" (UID: \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\") " pod="openstack/dnsmasq-dns-d558885bc-hhp85" Nov 24 08:17:19 crc kubenswrapper[4691]: I1124 08:17:19.898978 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-dns-svc\") pod \"dnsmasq-dns-d558885bc-hhp85\" (UID: \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\") " pod="openstack/dnsmasq-dns-d558885bc-hhp85" Nov 24 08:17:19 crc kubenswrapper[4691]: I1124 08:17:19.899139 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-openstack-edpm-ipam\") pod \"dnsmasq-dns-d558885bc-hhp85\" (UID: \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\") " pod="openstack/dnsmasq-dns-d558885bc-hhp85" Nov 24 08:17:19 crc kubenswrapper[4691]: I1124 08:17:19.899165 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-ovsdbserver-sb\") pod \"dnsmasq-dns-d558885bc-hhp85\" (UID: \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\") " pod="openstack/dnsmasq-dns-d558885bc-hhp85" Nov 24 08:17:19 crc kubenswrapper[4691]: I1124 08:17:19.899191 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-ovsdbserver-nb\") pod \"dnsmasq-dns-d558885bc-hhp85\" (UID: \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\") " pod="openstack/dnsmasq-dns-d558885bc-hhp85" Nov 24 08:17:19 crc kubenswrapper[4691]: I1124 08:17:19.901040 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-dns-svc\") pod \"dnsmasq-dns-d558885bc-hhp85\" (UID: \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\") " pod="openstack/dnsmasq-dns-d558885bc-hhp85" Nov 24 08:17:19 crc kubenswrapper[4691]: I1124 08:17:19.901057 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-ovsdbserver-sb\") pod \"dnsmasq-dns-d558885bc-hhp85\" (UID: \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\") " pod="openstack/dnsmasq-dns-d558885bc-hhp85" Nov 24 08:17:19 crc kubenswrapper[4691]: I1124 08:17:19.901531 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-config\") pod \"dnsmasq-dns-d558885bc-hhp85\" (UID: \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\") " pod="openstack/dnsmasq-dns-d558885bc-hhp85" Nov 24 08:17:19 crc kubenswrapper[4691]: I1124 08:17:19.902068 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-ovsdbserver-nb\") pod \"dnsmasq-dns-d558885bc-hhp85\" (UID: \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\") " pod="openstack/dnsmasq-dns-d558885bc-hhp85" Nov 24 08:17:19 crc kubenswrapper[4691]: I1124 08:17:19.902081 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-openstack-edpm-ipam\") pod \"dnsmasq-dns-d558885bc-hhp85\" (UID: \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\") " pod="openstack/dnsmasq-dns-d558885bc-hhp85" Nov 24 08:17:19 crc kubenswrapper[4691]: I1124 08:17:19.902514 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-dns-swift-storage-0\") pod \"dnsmasq-dns-d558885bc-hhp85\" (UID: \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\") " pod="openstack/dnsmasq-dns-d558885bc-hhp85" Nov 24 08:17:19 crc kubenswrapper[4691]: I1124 08:17:19.919385 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l7t4k\" (UniqueName: \"kubernetes.io/projected/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-kube-api-access-l7t4k\") pod \"dnsmasq-dns-d558885bc-hhp85\" (UID: \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\") " pod="openstack/dnsmasq-dns-d558885bc-hhp85" Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.007915 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d558885bc-hhp85" Nov 24 08:17:20 crc kubenswrapper[4691]: E1124 08:17:20.288605 4691 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod224d72d8_5d0a_48df_8930_2cb28fc1fd93.slice/crio-cecf5374f4f9703c942989740b54a5434cff6537997e0fb2b48d18ffe7aa244a.scope\": RecentStats: unable to find data in memory cache]" Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.513278 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-d558885bc-hhp85"] Nov 24 08:17:20 crc kubenswrapper[4691]: W1124 08:17:20.523827 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod64099042_ba99_4d8a_8419_4ca6e9dd0aa4.slice/crio-7d478cb4430094acfa498540658d637fb46a4c6b04a7e7ad2b3184852aabfa39 WatchSource:0}: Error finding container 7d478cb4430094acfa498540658d637fb46a4c6b04a7e7ad2b3184852aabfa39: Status 404 returned error can't find the container with id 7d478cb4430094acfa498540658d637fb46a4c6b04a7e7ad2b3184852aabfa39 Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.582553 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.719723 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/224d72d8-5d0a-48df-8930-2cb28fc1fd93-rabbitmq-plugins\") pod \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.720154 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/224d72d8-5d0a-48df-8930-2cb28fc1fd93-server-conf\") pod \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.720196 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/224d72d8-5d0a-48df-8930-2cb28fc1fd93-rabbitmq-erlang-cookie\") pod \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.720235 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/224d72d8-5d0a-48df-8930-2cb28fc1fd93-rabbitmq-tls\") pod \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.720263 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/224d72d8-5d0a-48df-8930-2cb28fc1fd93-rabbitmq-confd\") pod \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.720321 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.720406 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/224d72d8-5d0a-48df-8930-2cb28fc1fd93-erlang-cookie-secret\") pod \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.720444 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/224d72d8-5d0a-48df-8930-2cb28fc1fd93-pod-info\") pod \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.720481 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/224d72d8-5d0a-48df-8930-2cb28fc1fd93-config-data\") pod \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.720513 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m6hd2\" (UniqueName: \"kubernetes.io/projected/224d72d8-5d0a-48df-8930-2cb28fc1fd93-kube-api-access-m6hd2\") pod \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.720606 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/224d72d8-5d0a-48df-8930-2cb28fc1fd93-plugins-conf\") pod \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\" (UID: \"224d72d8-5d0a-48df-8930-2cb28fc1fd93\") " Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.721575 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/224d72d8-5d0a-48df-8930-2cb28fc1fd93-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "224d72d8-5d0a-48df-8930-2cb28fc1fd93" (UID: "224d72d8-5d0a-48df-8930-2cb28fc1fd93"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.721936 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/224d72d8-5d0a-48df-8930-2cb28fc1fd93-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "224d72d8-5d0a-48df-8930-2cb28fc1fd93" (UID: "224d72d8-5d0a-48df-8930-2cb28fc1fd93"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.724157 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/224d72d8-5d0a-48df-8930-2cb28fc1fd93-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "224d72d8-5d0a-48df-8930-2cb28fc1fd93" (UID: "224d72d8-5d0a-48df-8930-2cb28fc1fd93"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.759963 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/224d72d8-5d0a-48df-8930-2cb28fc1fd93-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "224d72d8-5d0a-48df-8930-2cb28fc1fd93" (UID: "224d72d8-5d0a-48df-8930-2cb28fc1fd93"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.809484 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/224d72d8-5d0a-48df-8930-2cb28fc1fd93-pod-info" (OuterVolumeSpecName: "pod-info") pod "224d72d8-5d0a-48df-8930-2cb28fc1fd93" (UID: "224d72d8-5d0a-48df-8930-2cb28fc1fd93"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.809811 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/224d72d8-5d0a-48df-8930-2cb28fc1fd93-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "224d72d8-5d0a-48df-8930-2cb28fc1fd93" (UID: "224d72d8-5d0a-48df-8930-2cb28fc1fd93"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.809724 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/224d72d8-5d0a-48df-8930-2cb28fc1fd93-kube-api-access-m6hd2" (OuterVolumeSpecName: "kube-api-access-m6hd2") pod "224d72d8-5d0a-48df-8930-2cb28fc1fd93" (UID: "224d72d8-5d0a-48df-8930-2cb28fc1fd93"). InnerVolumeSpecName "kube-api-access-m6hd2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.809924 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "persistence") pod "224d72d8-5d0a-48df-8930-2cb28fc1fd93" (UID: "224d72d8-5d0a-48df-8930-2cb28fc1fd93"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.822708 4691 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/224d72d8-5d0a-48df-8930-2cb28fc1fd93-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.822747 4691 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/224d72d8-5d0a-48df-8930-2cb28fc1fd93-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.822771 4691 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.822780 4691 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/224d72d8-5d0a-48df-8930-2cb28fc1fd93-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.822791 4691 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/224d72d8-5d0a-48df-8930-2cb28fc1fd93-pod-info\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.822799 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m6hd2\" (UniqueName: \"kubernetes.io/projected/224d72d8-5d0a-48df-8930-2cb28fc1fd93-kube-api-access-m6hd2\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.822807 4691 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/224d72d8-5d0a-48df-8930-2cb28fc1fd93-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.822816 4691 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/224d72d8-5d0a-48df-8930-2cb28fc1fd93-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.866532 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/224d72d8-5d0a-48df-8930-2cb28fc1fd93-config-data" (OuterVolumeSpecName: "config-data") pod "224d72d8-5d0a-48df-8930-2cb28fc1fd93" (UID: "224d72d8-5d0a-48df-8930-2cb28fc1fd93"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.871396 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/224d72d8-5d0a-48df-8930-2cb28fc1fd93-server-conf" (OuterVolumeSpecName: "server-conf") pod "224d72d8-5d0a-48df-8930-2cb28fc1fd93" (UID: "224d72d8-5d0a-48df-8930-2cb28fc1fd93"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.886026 4691 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.927636 4691 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/224d72d8-5d0a-48df-8930-2cb28fc1fd93-server-conf\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.927671 4691 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.927680 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/224d72d8-5d0a-48df-8930-2cb28fc1fd93-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:20 crc kubenswrapper[4691]: I1124 08:17:20.944737 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/224d72d8-5d0a-48df-8930-2cb28fc1fd93-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "224d72d8-5d0a-48df-8930-2cb28fc1fd93" (UID: "224d72d8-5d0a-48df-8930-2cb28fc1fd93"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.029982 4691 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/224d72d8-5d0a-48df-8930-2cb28fc1fd93-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.059095 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d558885bc-hhp85" event={"ID":"64099042-ba99-4d8a-8419-4ca6e9dd0aa4","Type":"ContainerDied","Data":"12452fda145d8ba970192ed0ba63060699dd4541d98c6e5edccd476864dc54a6"} Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.058711 4691 generic.go:334] "Generic (PLEG): container finished" podID="64099042-ba99-4d8a-8419-4ca6e9dd0aa4" containerID="12452fda145d8ba970192ed0ba63060699dd4541d98c6e5edccd476864dc54a6" exitCode=0 Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.060045 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d558885bc-hhp85" event={"ID":"64099042-ba99-4d8a-8419-4ca6e9dd0aa4","Type":"ContainerStarted","Data":"7d478cb4430094acfa498540658d637fb46a4c6b04a7e7ad2b3184852aabfa39"} Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.076607 4691 generic.go:334] "Generic (PLEG): container finished" podID="224d72d8-5d0a-48df-8930-2cb28fc1fd93" containerID="cecf5374f4f9703c942989740b54a5434cff6537997e0fb2b48d18ffe7aa244a" exitCode=0 Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.076681 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"224d72d8-5d0a-48df-8930-2cb28fc1fd93","Type":"ContainerDied","Data":"cecf5374f4f9703c942989740b54a5434cff6537997e0fb2b48d18ffe7aa244a"} Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.076726 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"224d72d8-5d0a-48df-8930-2cb28fc1fd93","Type":"ContainerDied","Data":"9170c4b9b023350c74d9e3bb8519cf8438cf56de5a0e0183f2c87210001de793"} Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.076777 4691 scope.go:117] "RemoveContainer" containerID="cecf5374f4f9703c942989740b54a5434cff6537997e0fb2b48d18ffe7aa244a" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.077038 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.251721 4691 scope.go:117] "RemoveContainer" containerID="b2ba845015f693518c179a10b585e65cf75a15ce74c8f20767c76c2d6e35d283" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.369961 4691 scope.go:117] "RemoveContainer" containerID="cecf5374f4f9703c942989740b54a5434cff6537997e0fb2b48d18ffe7aa244a" Nov 24 08:17:21 crc kubenswrapper[4691]: E1124 08:17:21.370885 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cecf5374f4f9703c942989740b54a5434cff6537997e0fb2b48d18ffe7aa244a\": container with ID starting with cecf5374f4f9703c942989740b54a5434cff6537997e0fb2b48d18ffe7aa244a not found: ID does not exist" containerID="cecf5374f4f9703c942989740b54a5434cff6537997e0fb2b48d18ffe7aa244a" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.370919 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cecf5374f4f9703c942989740b54a5434cff6537997e0fb2b48d18ffe7aa244a"} err="failed to get container status \"cecf5374f4f9703c942989740b54a5434cff6537997e0fb2b48d18ffe7aa244a\": rpc error: code = NotFound desc = could not find container \"cecf5374f4f9703c942989740b54a5434cff6537997e0fb2b48d18ffe7aa244a\": container with ID starting with cecf5374f4f9703c942989740b54a5434cff6537997e0fb2b48d18ffe7aa244a not found: ID does not exist" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.370940 4691 scope.go:117] "RemoveContainer" containerID="b2ba845015f693518c179a10b585e65cf75a15ce74c8f20767c76c2d6e35d283" Nov 24 08:17:21 crc kubenswrapper[4691]: E1124 08:17:21.371362 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b2ba845015f693518c179a10b585e65cf75a15ce74c8f20767c76c2d6e35d283\": container with ID starting with b2ba845015f693518c179a10b585e65cf75a15ce74c8f20767c76c2d6e35d283 not found: ID does not exist" containerID="b2ba845015f693518c179a10b585e65cf75a15ce74c8f20767c76c2d6e35d283" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.371388 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b2ba845015f693518c179a10b585e65cf75a15ce74c8f20767c76c2d6e35d283"} err="failed to get container status \"b2ba845015f693518c179a10b585e65cf75a15ce74c8f20767c76c2d6e35d283\": rpc error: code = NotFound desc = could not find container \"b2ba845015f693518c179a10b585e65cf75a15ce74c8f20767c76c2d6e35d283\": container with ID starting with b2ba845015f693518c179a10b585e65cf75a15ce74c8f20767c76c2d6e35d283 not found: ID does not exist" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.396100 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.426073 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.434868 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 24 08:17:21 crc kubenswrapper[4691]: E1124 08:17:21.435621 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="224d72d8-5d0a-48df-8930-2cb28fc1fd93" containerName="setup-container" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.435648 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="224d72d8-5d0a-48df-8930-2cb28fc1fd93" containerName="setup-container" Nov 24 08:17:21 crc kubenswrapper[4691]: E1124 08:17:21.435668 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="224d72d8-5d0a-48df-8930-2cb28fc1fd93" containerName="rabbitmq" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.435676 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="224d72d8-5d0a-48df-8930-2cb28fc1fd93" containerName="rabbitmq" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.436072 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="224d72d8-5d0a-48df-8930-2cb28fc1fd93" containerName="rabbitmq" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.437591 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.441150 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.443086 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-9tm7h" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.443346 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.444463 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.444744 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.444873 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.445012 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.448786 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.545732 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/19b40ace-19bb-41b3-8b25-f93691331766-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"19b40ace-19bb-41b3-8b25-f93691331766\") " pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.545800 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/19b40ace-19bb-41b3-8b25-f93691331766-pod-info\") pod \"rabbitmq-server-0\" (UID: \"19b40ace-19bb-41b3-8b25-f93691331766\") " pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.545826 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/19b40ace-19bb-41b3-8b25-f93691331766-server-conf\") pod \"rabbitmq-server-0\" (UID: \"19b40ace-19bb-41b3-8b25-f93691331766\") " pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.545848 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5qm4x\" (UniqueName: \"kubernetes.io/projected/19b40ace-19bb-41b3-8b25-f93691331766-kube-api-access-5qm4x\") pod \"rabbitmq-server-0\" (UID: \"19b40ace-19bb-41b3-8b25-f93691331766\") " pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.546046 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"19b40ace-19bb-41b3-8b25-f93691331766\") " pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.546229 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/19b40ace-19bb-41b3-8b25-f93691331766-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"19b40ace-19bb-41b3-8b25-f93691331766\") " pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.546255 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/19b40ace-19bb-41b3-8b25-f93691331766-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"19b40ace-19bb-41b3-8b25-f93691331766\") " pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.546282 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/19b40ace-19bb-41b3-8b25-f93691331766-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"19b40ace-19bb-41b3-8b25-f93691331766\") " pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.546299 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/19b40ace-19bb-41b3-8b25-f93691331766-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"19b40ace-19bb-41b3-8b25-f93691331766\") " pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.546360 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/19b40ace-19bb-41b3-8b25-f93691331766-config-data\") pod \"rabbitmq-server-0\" (UID: \"19b40ace-19bb-41b3-8b25-f93691331766\") " pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.546486 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/19b40ace-19bb-41b3-8b25-f93691331766-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"19b40ace-19bb-41b3-8b25-f93691331766\") " pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.656423 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/19b40ace-19bb-41b3-8b25-f93691331766-config-data\") pod \"rabbitmq-server-0\" (UID: \"19b40ace-19bb-41b3-8b25-f93691331766\") " pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.656536 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/19b40ace-19bb-41b3-8b25-f93691331766-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"19b40ace-19bb-41b3-8b25-f93691331766\") " pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.656569 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/19b40ace-19bb-41b3-8b25-f93691331766-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"19b40ace-19bb-41b3-8b25-f93691331766\") " pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.656597 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/19b40ace-19bb-41b3-8b25-f93691331766-pod-info\") pod \"rabbitmq-server-0\" (UID: \"19b40ace-19bb-41b3-8b25-f93691331766\") " pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.656624 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/19b40ace-19bb-41b3-8b25-f93691331766-server-conf\") pod \"rabbitmq-server-0\" (UID: \"19b40ace-19bb-41b3-8b25-f93691331766\") " pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.656648 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5qm4x\" (UniqueName: \"kubernetes.io/projected/19b40ace-19bb-41b3-8b25-f93691331766-kube-api-access-5qm4x\") pod \"rabbitmq-server-0\" (UID: \"19b40ace-19bb-41b3-8b25-f93691331766\") " pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.656694 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"19b40ace-19bb-41b3-8b25-f93691331766\") " pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.656757 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/19b40ace-19bb-41b3-8b25-f93691331766-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"19b40ace-19bb-41b3-8b25-f93691331766\") " pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.656774 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/19b40ace-19bb-41b3-8b25-f93691331766-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"19b40ace-19bb-41b3-8b25-f93691331766\") " pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.656790 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/19b40ace-19bb-41b3-8b25-f93691331766-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"19b40ace-19bb-41b3-8b25-f93691331766\") " pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.656807 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/19b40ace-19bb-41b3-8b25-f93691331766-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"19b40ace-19bb-41b3-8b25-f93691331766\") " pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.658374 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/19b40ace-19bb-41b3-8b25-f93691331766-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"19b40ace-19bb-41b3-8b25-f93691331766\") " pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.659047 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/19b40ace-19bb-41b3-8b25-f93691331766-server-conf\") pod \"rabbitmq-server-0\" (UID: \"19b40ace-19bb-41b3-8b25-f93691331766\") " pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.659357 4691 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"19b40ace-19bb-41b3-8b25-f93691331766\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.659691 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/19b40ace-19bb-41b3-8b25-f93691331766-config-data\") pod \"rabbitmq-server-0\" (UID: \"19b40ace-19bb-41b3-8b25-f93691331766\") " pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.664024 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/19b40ace-19bb-41b3-8b25-f93691331766-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"19b40ace-19bb-41b3-8b25-f93691331766\") " pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.664309 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/19b40ace-19bb-41b3-8b25-f93691331766-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"19b40ace-19bb-41b3-8b25-f93691331766\") " pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.665250 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/19b40ace-19bb-41b3-8b25-f93691331766-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"19b40ace-19bb-41b3-8b25-f93691331766\") " pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.666984 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/19b40ace-19bb-41b3-8b25-f93691331766-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"19b40ace-19bb-41b3-8b25-f93691331766\") " pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.672775 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/19b40ace-19bb-41b3-8b25-f93691331766-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"19b40ace-19bb-41b3-8b25-f93691331766\") " pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.673185 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/19b40ace-19bb-41b3-8b25-f93691331766-pod-info\") pod \"rabbitmq-server-0\" (UID: \"19b40ace-19bb-41b3-8b25-f93691331766\") " pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.678757 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5qm4x\" (UniqueName: \"kubernetes.io/projected/19b40ace-19bb-41b3-8b25-f93691331766-kube-api-access-5qm4x\") pod \"rabbitmq-server-0\" (UID: \"19b40ace-19bb-41b3-8b25-f93691331766\") " pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.691779 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.703109 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"19b40ace-19bb-41b3-8b25-f93691331766\") " pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.773209 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.860306 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/60038211-87c8-4170-8fd0-35df8a16aa92-plugins-conf\") pod \"60038211-87c8-4170-8fd0-35df8a16aa92\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.860439 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/60038211-87c8-4170-8fd0-35df8a16aa92-rabbitmq-tls\") pod \"60038211-87c8-4170-8fd0-35df8a16aa92\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.860585 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fbpv5\" (UniqueName: \"kubernetes.io/projected/60038211-87c8-4170-8fd0-35df8a16aa92-kube-api-access-fbpv5\") pod \"60038211-87c8-4170-8fd0-35df8a16aa92\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.860617 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/60038211-87c8-4170-8fd0-35df8a16aa92-rabbitmq-confd\") pod \"60038211-87c8-4170-8fd0-35df8a16aa92\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.860654 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/60038211-87c8-4170-8fd0-35df8a16aa92-rabbitmq-erlang-cookie\") pod \"60038211-87c8-4170-8fd0-35df8a16aa92\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.860702 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/60038211-87c8-4170-8fd0-35df8a16aa92-config-data\") pod \"60038211-87c8-4170-8fd0-35df8a16aa92\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.860729 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/60038211-87c8-4170-8fd0-35df8a16aa92-pod-info\") pod \"60038211-87c8-4170-8fd0-35df8a16aa92\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.860798 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/60038211-87c8-4170-8fd0-35df8a16aa92-erlang-cookie-secret\") pod \"60038211-87c8-4170-8fd0-35df8a16aa92\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.860853 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/60038211-87c8-4170-8fd0-35df8a16aa92-server-conf\") pod \"60038211-87c8-4170-8fd0-35df8a16aa92\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.860879 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"60038211-87c8-4170-8fd0-35df8a16aa92\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.860925 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/60038211-87c8-4170-8fd0-35df8a16aa92-rabbitmq-plugins\") pod \"60038211-87c8-4170-8fd0-35df8a16aa92\" (UID: \"60038211-87c8-4170-8fd0-35df8a16aa92\") " Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.863156 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/60038211-87c8-4170-8fd0-35df8a16aa92-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "60038211-87c8-4170-8fd0-35df8a16aa92" (UID: "60038211-87c8-4170-8fd0-35df8a16aa92"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.863816 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/60038211-87c8-4170-8fd0-35df8a16aa92-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "60038211-87c8-4170-8fd0-35df8a16aa92" (UID: "60038211-87c8-4170-8fd0-35df8a16aa92"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.864482 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/60038211-87c8-4170-8fd0-35df8a16aa92-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "60038211-87c8-4170-8fd0-35df8a16aa92" (UID: "60038211-87c8-4170-8fd0-35df8a16aa92"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.869381 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "persistence") pod "60038211-87c8-4170-8fd0-35df8a16aa92" (UID: "60038211-87c8-4170-8fd0-35df8a16aa92"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.876741 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/60038211-87c8-4170-8fd0-35df8a16aa92-kube-api-access-fbpv5" (OuterVolumeSpecName: "kube-api-access-fbpv5") pod "60038211-87c8-4170-8fd0-35df8a16aa92" (UID: "60038211-87c8-4170-8fd0-35df8a16aa92"). InnerVolumeSpecName "kube-api-access-fbpv5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.882651 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/60038211-87c8-4170-8fd0-35df8a16aa92-pod-info" (OuterVolumeSpecName: "pod-info") pod "60038211-87c8-4170-8fd0-35df8a16aa92" (UID: "60038211-87c8-4170-8fd0-35df8a16aa92"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.893086 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/60038211-87c8-4170-8fd0-35df8a16aa92-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "60038211-87c8-4170-8fd0-35df8a16aa92" (UID: "60038211-87c8-4170-8fd0-35df8a16aa92"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.913974 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/60038211-87c8-4170-8fd0-35df8a16aa92-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "60038211-87c8-4170-8fd0-35df8a16aa92" (UID: "60038211-87c8-4170-8fd0-35df8a16aa92"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.949408 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/60038211-87c8-4170-8fd0-35df8a16aa92-server-conf" (OuterVolumeSpecName: "server-conf") pod "60038211-87c8-4170-8fd0-35df8a16aa92" (UID: "60038211-87c8-4170-8fd0-35df8a16aa92"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.954100 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/60038211-87c8-4170-8fd0-35df8a16aa92-config-data" (OuterVolumeSpecName: "config-data") pod "60038211-87c8-4170-8fd0-35df8a16aa92" (UID: "60038211-87c8-4170-8fd0-35df8a16aa92"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.967879 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fbpv5\" (UniqueName: \"kubernetes.io/projected/60038211-87c8-4170-8fd0-35df8a16aa92-kube-api-access-fbpv5\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.967913 4691 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/60038211-87c8-4170-8fd0-35df8a16aa92-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.967924 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/60038211-87c8-4170-8fd0-35df8a16aa92-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.967936 4691 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/60038211-87c8-4170-8fd0-35df8a16aa92-pod-info\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.967945 4691 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/60038211-87c8-4170-8fd0-35df8a16aa92-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.967953 4691 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/60038211-87c8-4170-8fd0-35df8a16aa92-server-conf\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.969709 4691 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.969797 4691 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/60038211-87c8-4170-8fd0-35df8a16aa92-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.969809 4691 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/60038211-87c8-4170-8fd0-35df8a16aa92-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:21 crc kubenswrapper[4691]: I1124 08:17:21.969819 4691 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/60038211-87c8-4170-8fd0-35df8a16aa92-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.014731 4691 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.036904 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/60038211-87c8-4170-8fd0-35df8a16aa92-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "60038211-87c8-4170-8fd0-35df8a16aa92" (UID: "60038211-87c8-4170-8fd0-35df8a16aa92"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.072000 4691 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.072027 4691 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/60038211-87c8-4170-8fd0-35df8a16aa92-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.098413 4691 generic.go:334] "Generic (PLEG): container finished" podID="60038211-87c8-4170-8fd0-35df8a16aa92" containerID="5484e06182bf74af6a6cbfcea515b199c54cae93a2775659f4f489265709e66f" exitCode=0 Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.098500 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"60038211-87c8-4170-8fd0-35df8a16aa92","Type":"ContainerDied","Data":"5484e06182bf74af6a6cbfcea515b199c54cae93a2775659f4f489265709e66f"} Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.098526 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"60038211-87c8-4170-8fd0-35df8a16aa92","Type":"ContainerDied","Data":"3ff9ab7db9deb0e0331e70d165b6281d4565ece69c5f21954e24f4dec35406b2"} Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.098565 4691 scope.go:117] "RemoveContainer" containerID="5484e06182bf74af6a6cbfcea515b199c54cae93a2775659f4f489265709e66f" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.098709 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.117143 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d558885bc-hhp85" event={"ID":"64099042-ba99-4d8a-8419-4ca6e9dd0aa4","Type":"ContainerStarted","Data":"36d325279ceddbd0c0fcd2824cb9ffc3cf2ed79139616375967676504ec5c48d"} Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.117359 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-d558885bc-hhp85" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.136958 4691 scope.go:117] "RemoveContainer" containerID="24e84f0b81dd6e2ede17b256a9363d2077a55960ca9ab99278dcc179fc0c7785" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.144521 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-d558885bc-hhp85" podStartSLOduration=3.144501385 podStartE2EDuration="3.144501385s" podCreationTimestamp="2025-11-24 08:17:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:17:22.133461885 +0000 UTC m=+1204.132411134" watchObservedRunningTime="2025-11-24 08:17:22.144501385 +0000 UTC m=+1204.143450644" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.160792 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.188657 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.196576 4691 scope.go:117] "RemoveContainer" containerID="5484e06182bf74af6a6cbfcea515b199c54cae93a2775659f4f489265709e66f" Nov 24 08:17:22 crc kubenswrapper[4691]: E1124 08:17:22.197058 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5484e06182bf74af6a6cbfcea515b199c54cae93a2775659f4f489265709e66f\": container with ID starting with 5484e06182bf74af6a6cbfcea515b199c54cae93a2775659f4f489265709e66f not found: ID does not exist" containerID="5484e06182bf74af6a6cbfcea515b199c54cae93a2775659f4f489265709e66f" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.197102 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5484e06182bf74af6a6cbfcea515b199c54cae93a2775659f4f489265709e66f"} err="failed to get container status \"5484e06182bf74af6a6cbfcea515b199c54cae93a2775659f4f489265709e66f\": rpc error: code = NotFound desc = could not find container \"5484e06182bf74af6a6cbfcea515b199c54cae93a2775659f4f489265709e66f\": container with ID starting with 5484e06182bf74af6a6cbfcea515b199c54cae93a2775659f4f489265709e66f not found: ID does not exist" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.197130 4691 scope.go:117] "RemoveContainer" containerID="24e84f0b81dd6e2ede17b256a9363d2077a55960ca9ab99278dcc179fc0c7785" Nov 24 08:17:22 crc kubenswrapper[4691]: E1124 08:17:22.197412 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"24e84f0b81dd6e2ede17b256a9363d2077a55960ca9ab99278dcc179fc0c7785\": container with ID starting with 24e84f0b81dd6e2ede17b256a9363d2077a55960ca9ab99278dcc179fc0c7785 not found: ID does not exist" containerID="24e84f0b81dd6e2ede17b256a9363d2077a55960ca9ab99278dcc179fc0c7785" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.197484 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24e84f0b81dd6e2ede17b256a9363d2077a55960ca9ab99278dcc179fc0c7785"} err="failed to get container status \"24e84f0b81dd6e2ede17b256a9363d2077a55960ca9ab99278dcc179fc0c7785\": rpc error: code = NotFound desc = could not find container \"24e84f0b81dd6e2ede17b256a9363d2077a55960ca9ab99278dcc179fc0c7785\": container with ID starting with 24e84f0b81dd6e2ede17b256a9363d2077a55960ca9ab99278dcc179fc0c7785 not found: ID does not exist" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.213563 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 24 08:17:22 crc kubenswrapper[4691]: E1124 08:17:22.214172 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60038211-87c8-4170-8fd0-35df8a16aa92" containerName="setup-container" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.214195 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="60038211-87c8-4170-8fd0-35df8a16aa92" containerName="setup-container" Nov 24 08:17:22 crc kubenswrapper[4691]: E1124 08:17:22.214220 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60038211-87c8-4170-8fd0-35df8a16aa92" containerName="rabbitmq" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.214229 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="60038211-87c8-4170-8fd0-35df8a16aa92" containerName="rabbitmq" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.214476 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="60038211-87c8-4170-8fd0-35df8a16aa92" containerName="rabbitmq" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.215905 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.218174 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.218442 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-6lbs8" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.218658 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.218840 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.221630 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.221954 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.222638 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.229784 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.299300 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.381925 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4e65164c-c11a-4774-808c-f0dbdf7f9ffa-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e65164c-c11a-4774-808c-f0dbdf7f9ffa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.381996 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4e65164c-c11a-4774-808c-f0dbdf7f9ffa-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e65164c-c11a-4774-808c-f0dbdf7f9ffa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.382079 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4e65164c-c11a-4774-808c-f0dbdf7f9ffa-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e65164c-c11a-4774-808c-f0dbdf7f9ffa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.382120 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4e65164c-c11a-4774-808c-f0dbdf7f9ffa-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e65164c-c11a-4774-808c-f0dbdf7f9ffa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.382160 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e65164c-c11a-4774-808c-f0dbdf7f9ffa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.382183 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4e65164c-c11a-4774-808c-f0dbdf7f9ffa-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e65164c-c11a-4774-808c-f0dbdf7f9ffa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.382258 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4e65164c-c11a-4774-808c-f0dbdf7f9ffa-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e65164c-c11a-4774-808c-f0dbdf7f9ffa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.382305 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4e65164c-c11a-4774-808c-f0dbdf7f9ffa-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e65164c-c11a-4774-808c-f0dbdf7f9ffa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.382328 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j4hwf\" (UniqueName: \"kubernetes.io/projected/4e65164c-c11a-4774-808c-f0dbdf7f9ffa-kube-api-access-j4hwf\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e65164c-c11a-4774-808c-f0dbdf7f9ffa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.382376 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4e65164c-c11a-4774-808c-f0dbdf7f9ffa-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e65164c-c11a-4774-808c-f0dbdf7f9ffa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.382400 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4e65164c-c11a-4774-808c-f0dbdf7f9ffa-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e65164c-c11a-4774-808c-f0dbdf7f9ffa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.485124 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4e65164c-c11a-4774-808c-f0dbdf7f9ffa-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e65164c-c11a-4774-808c-f0dbdf7f9ffa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.485267 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4e65164c-c11a-4774-808c-f0dbdf7f9ffa-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e65164c-c11a-4774-808c-f0dbdf7f9ffa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.485306 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j4hwf\" (UniqueName: \"kubernetes.io/projected/4e65164c-c11a-4774-808c-f0dbdf7f9ffa-kube-api-access-j4hwf\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e65164c-c11a-4774-808c-f0dbdf7f9ffa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.485391 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4e65164c-c11a-4774-808c-f0dbdf7f9ffa-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e65164c-c11a-4774-808c-f0dbdf7f9ffa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.485435 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4e65164c-c11a-4774-808c-f0dbdf7f9ffa-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e65164c-c11a-4774-808c-f0dbdf7f9ffa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.486010 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4e65164c-c11a-4774-808c-f0dbdf7f9ffa-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e65164c-c11a-4774-808c-f0dbdf7f9ffa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.486276 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4e65164c-c11a-4774-808c-f0dbdf7f9ffa-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e65164c-c11a-4774-808c-f0dbdf7f9ffa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.486509 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4e65164c-c11a-4774-808c-f0dbdf7f9ffa-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e65164c-c11a-4774-808c-f0dbdf7f9ffa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.486589 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4e65164c-c11a-4774-808c-f0dbdf7f9ffa-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e65164c-c11a-4774-808c-f0dbdf7f9ffa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.487325 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4e65164c-c11a-4774-808c-f0dbdf7f9ffa-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e65164c-c11a-4774-808c-f0dbdf7f9ffa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.487529 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4e65164c-c11a-4774-808c-f0dbdf7f9ffa-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e65164c-c11a-4774-808c-f0dbdf7f9ffa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.487587 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4e65164c-c11a-4774-808c-f0dbdf7f9ffa-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e65164c-c11a-4774-808c-f0dbdf7f9ffa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.487648 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e65164c-c11a-4774-808c-f0dbdf7f9ffa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.487711 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4e65164c-c11a-4774-808c-f0dbdf7f9ffa-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e65164c-c11a-4774-808c-f0dbdf7f9ffa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.487867 4691 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e65164c-c11a-4774-808c-f0dbdf7f9ffa\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.488961 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4e65164c-c11a-4774-808c-f0dbdf7f9ffa-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e65164c-c11a-4774-808c-f0dbdf7f9ffa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.489102 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4e65164c-c11a-4774-808c-f0dbdf7f9ffa-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e65164c-c11a-4774-808c-f0dbdf7f9ffa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.491799 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4e65164c-c11a-4774-808c-f0dbdf7f9ffa-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e65164c-c11a-4774-808c-f0dbdf7f9ffa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.492798 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4e65164c-c11a-4774-808c-f0dbdf7f9ffa-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e65164c-c11a-4774-808c-f0dbdf7f9ffa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.493232 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4e65164c-c11a-4774-808c-f0dbdf7f9ffa-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e65164c-c11a-4774-808c-f0dbdf7f9ffa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.501293 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4e65164c-c11a-4774-808c-f0dbdf7f9ffa-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e65164c-c11a-4774-808c-f0dbdf7f9ffa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.507613 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j4hwf\" (UniqueName: \"kubernetes.io/projected/4e65164c-c11a-4774-808c-f0dbdf7f9ffa-kube-api-access-j4hwf\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e65164c-c11a-4774-808c-f0dbdf7f9ffa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.524680 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"4e65164c-c11a-4774-808c-f0dbdf7f9ffa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.538364 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.772613 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="224d72d8-5d0a-48df-8930-2cb28fc1fd93" path="/var/lib/kubelet/pods/224d72d8-5d0a-48df-8930-2cb28fc1fd93/volumes" Nov 24 08:17:22 crc kubenswrapper[4691]: I1124 08:17:22.773964 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="60038211-87c8-4170-8fd0-35df8a16aa92" path="/var/lib/kubelet/pods/60038211-87c8-4170-8fd0-35df8a16aa92/volumes" Nov 24 08:17:23 crc kubenswrapper[4691]: I1124 08:17:23.011151 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 24 08:17:23 crc kubenswrapper[4691]: W1124 08:17:23.019360 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4e65164c_c11a_4774_808c_f0dbdf7f9ffa.slice/crio-c72596f5a448e67f5bfb97b9a7a2b4bedbdf7a948548c99e3ebcd9d6b844b287 WatchSource:0}: Error finding container c72596f5a448e67f5bfb97b9a7a2b4bedbdf7a948548c99e3ebcd9d6b844b287: Status 404 returned error can't find the container with id c72596f5a448e67f5bfb97b9a7a2b4bedbdf7a948548c99e3ebcd9d6b844b287 Nov 24 08:17:23 crc kubenswrapper[4691]: I1124 08:17:23.134017 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"19b40ace-19bb-41b3-8b25-f93691331766","Type":"ContainerStarted","Data":"745979e5ebdfdb9f46233345dc2b7f97590e5d176ce89fd5dc39ca6a099dd4dd"} Nov 24 08:17:23 crc kubenswrapper[4691]: I1124 08:17:23.138477 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"4e65164c-c11a-4774-808c-f0dbdf7f9ffa","Type":"ContainerStarted","Data":"c72596f5a448e67f5bfb97b9a7a2b4bedbdf7a948548c99e3ebcd9d6b844b287"} Nov 24 08:17:24 crc kubenswrapper[4691]: I1124 08:17:24.149547 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"19b40ace-19bb-41b3-8b25-f93691331766","Type":"ContainerStarted","Data":"5b451eabb3321ad4f5da5172263e3b2a1df211d98a0e4622266cc4b2d4bde1b2"} Nov 24 08:17:25 crc kubenswrapper[4691]: I1124 08:17:25.161354 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"4e65164c-c11a-4774-808c-f0dbdf7f9ffa","Type":"ContainerStarted","Data":"0f46aa79573ce238fff8d4afd4fea2abc992000f6ccf57941b63b8e6e621f456"} Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.010606 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-d558885bc-hhp85" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.077178 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-pqq4l"] Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.077437 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-cd5cbd7b9-pqq4l" podUID="dd8fb0be-4983-4cd1-b412-0c170edb6565" containerName="dnsmasq-dns" containerID="cri-o://a7d9fa456c5dcccd50a8c3e8c704c957e6c0ee57868d18e70db5f64d25439421" gracePeriod=10 Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.233282 4691 generic.go:334] "Generic (PLEG): container finished" podID="dd8fb0be-4983-4cd1-b412-0c170edb6565" containerID="a7d9fa456c5dcccd50a8c3e8c704c957e6c0ee57868d18e70db5f64d25439421" exitCode=0 Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.233325 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-pqq4l" event={"ID":"dd8fb0be-4983-4cd1-b412-0c170edb6565","Type":"ContainerDied","Data":"a7d9fa456c5dcccd50a8c3e8c704c957e6c0ee57868d18e70db5f64d25439421"} Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.240521 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78c64bc9c5-gx44c"] Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.242327 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78c64bc9c5-gx44c" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.250754 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78c64bc9c5-gx44c"] Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.364409 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qzwsn\" (UniqueName: \"kubernetes.io/projected/2f860729-9ea4-4236-9465-68ac2164ac5c-kube-api-access-qzwsn\") pod \"dnsmasq-dns-78c64bc9c5-gx44c\" (UID: \"2f860729-9ea4-4236-9465-68ac2164ac5c\") " pod="openstack/dnsmasq-dns-78c64bc9c5-gx44c" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.364813 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f860729-9ea4-4236-9465-68ac2164ac5c-config\") pod \"dnsmasq-dns-78c64bc9c5-gx44c\" (UID: \"2f860729-9ea4-4236-9465-68ac2164ac5c\") " pod="openstack/dnsmasq-dns-78c64bc9c5-gx44c" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.364872 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2f860729-9ea4-4236-9465-68ac2164ac5c-ovsdbserver-nb\") pod \"dnsmasq-dns-78c64bc9c5-gx44c\" (UID: \"2f860729-9ea4-4236-9465-68ac2164ac5c\") " pod="openstack/dnsmasq-dns-78c64bc9c5-gx44c" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.364918 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/2f860729-9ea4-4236-9465-68ac2164ac5c-openstack-edpm-ipam\") pod \"dnsmasq-dns-78c64bc9c5-gx44c\" (UID: \"2f860729-9ea4-4236-9465-68ac2164ac5c\") " pod="openstack/dnsmasq-dns-78c64bc9c5-gx44c" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.364957 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2f860729-9ea4-4236-9465-68ac2164ac5c-ovsdbserver-sb\") pod \"dnsmasq-dns-78c64bc9c5-gx44c\" (UID: \"2f860729-9ea4-4236-9465-68ac2164ac5c\") " pod="openstack/dnsmasq-dns-78c64bc9c5-gx44c" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.364994 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2f860729-9ea4-4236-9465-68ac2164ac5c-dns-svc\") pod \"dnsmasq-dns-78c64bc9c5-gx44c\" (UID: \"2f860729-9ea4-4236-9465-68ac2164ac5c\") " pod="openstack/dnsmasq-dns-78c64bc9c5-gx44c" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.365045 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2f860729-9ea4-4236-9465-68ac2164ac5c-dns-swift-storage-0\") pod \"dnsmasq-dns-78c64bc9c5-gx44c\" (UID: \"2f860729-9ea4-4236-9465-68ac2164ac5c\") " pod="openstack/dnsmasq-dns-78c64bc9c5-gx44c" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.466342 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2f860729-9ea4-4236-9465-68ac2164ac5c-dns-svc\") pod \"dnsmasq-dns-78c64bc9c5-gx44c\" (UID: \"2f860729-9ea4-4236-9465-68ac2164ac5c\") " pod="openstack/dnsmasq-dns-78c64bc9c5-gx44c" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.466429 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2f860729-9ea4-4236-9465-68ac2164ac5c-dns-swift-storage-0\") pod \"dnsmasq-dns-78c64bc9c5-gx44c\" (UID: \"2f860729-9ea4-4236-9465-68ac2164ac5c\") " pod="openstack/dnsmasq-dns-78c64bc9c5-gx44c" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.466475 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qzwsn\" (UniqueName: \"kubernetes.io/projected/2f860729-9ea4-4236-9465-68ac2164ac5c-kube-api-access-qzwsn\") pod \"dnsmasq-dns-78c64bc9c5-gx44c\" (UID: \"2f860729-9ea4-4236-9465-68ac2164ac5c\") " pod="openstack/dnsmasq-dns-78c64bc9c5-gx44c" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.466545 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f860729-9ea4-4236-9465-68ac2164ac5c-config\") pod \"dnsmasq-dns-78c64bc9c5-gx44c\" (UID: \"2f860729-9ea4-4236-9465-68ac2164ac5c\") " pod="openstack/dnsmasq-dns-78c64bc9c5-gx44c" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.466588 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2f860729-9ea4-4236-9465-68ac2164ac5c-ovsdbserver-nb\") pod \"dnsmasq-dns-78c64bc9c5-gx44c\" (UID: \"2f860729-9ea4-4236-9465-68ac2164ac5c\") " pod="openstack/dnsmasq-dns-78c64bc9c5-gx44c" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.466616 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/2f860729-9ea4-4236-9465-68ac2164ac5c-openstack-edpm-ipam\") pod \"dnsmasq-dns-78c64bc9c5-gx44c\" (UID: \"2f860729-9ea4-4236-9465-68ac2164ac5c\") " pod="openstack/dnsmasq-dns-78c64bc9c5-gx44c" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.466641 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2f860729-9ea4-4236-9465-68ac2164ac5c-ovsdbserver-sb\") pod \"dnsmasq-dns-78c64bc9c5-gx44c\" (UID: \"2f860729-9ea4-4236-9465-68ac2164ac5c\") " pod="openstack/dnsmasq-dns-78c64bc9c5-gx44c" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.467215 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2f860729-9ea4-4236-9465-68ac2164ac5c-dns-svc\") pod \"dnsmasq-dns-78c64bc9c5-gx44c\" (UID: \"2f860729-9ea4-4236-9465-68ac2164ac5c\") " pod="openstack/dnsmasq-dns-78c64bc9c5-gx44c" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.467597 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2f860729-9ea4-4236-9465-68ac2164ac5c-ovsdbserver-sb\") pod \"dnsmasq-dns-78c64bc9c5-gx44c\" (UID: \"2f860729-9ea4-4236-9465-68ac2164ac5c\") " pod="openstack/dnsmasq-dns-78c64bc9c5-gx44c" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.467945 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2f860729-9ea4-4236-9465-68ac2164ac5c-ovsdbserver-nb\") pod \"dnsmasq-dns-78c64bc9c5-gx44c\" (UID: \"2f860729-9ea4-4236-9465-68ac2164ac5c\") " pod="openstack/dnsmasq-dns-78c64bc9c5-gx44c" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.468203 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2f860729-9ea4-4236-9465-68ac2164ac5c-dns-swift-storage-0\") pod \"dnsmasq-dns-78c64bc9c5-gx44c\" (UID: \"2f860729-9ea4-4236-9465-68ac2164ac5c\") " pod="openstack/dnsmasq-dns-78c64bc9c5-gx44c" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.468703 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f860729-9ea4-4236-9465-68ac2164ac5c-config\") pod \"dnsmasq-dns-78c64bc9c5-gx44c\" (UID: \"2f860729-9ea4-4236-9465-68ac2164ac5c\") " pod="openstack/dnsmasq-dns-78c64bc9c5-gx44c" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.469338 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/2f860729-9ea4-4236-9465-68ac2164ac5c-openstack-edpm-ipam\") pod \"dnsmasq-dns-78c64bc9c5-gx44c\" (UID: \"2f860729-9ea4-4236-9465-68ac2164ac5c\") " pod="openstack/dnsmasq-dns-78c64bc9c5-gx44c" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.503620 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qzwsn\" (UniqueName: \"kubernetes.io/projected/2f860729-9ea4-4236-9465-68ac2164ac5c-kube-api-access-qzwsn\") pod \"dnsmasq-dns-78c64bc9c5-gx44c\" (UID: \"2f860729-9ea4-4236-9465-68ac2164ac5c\") " pod="openstack/dnsmasq-dns-78c64bc9c5-gx44c" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.562350 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78c64bc9c5-gx44c" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.682243 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-pqq4l" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.772482 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dd8fb0be-4983-4cd1-b412-0c170edb6565-ovsdbserver-sb\") pod \"dd8fb0be-4983-4cd1-b412-0c170edb6565\" (UID: \"dd8fb0be-4983-4cd1-b412-0c170edb6565\") " Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.772631 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dd8fb0be-4983-4cd1-b412-0c170edb6565-dns-svc\") pod \"dd8fb0be-4983-4cd1-b412-0c170edb6565\" (UID: \"dd8fb0be-4983-4cd1-b412-0c170edb6565\") " Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.772747 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd8fb0be-4983-4cd1-b412-0c170edb6565-config\") pod \"dd8fb0be-4983-4cd1-b412-0c170edb6565\" (UID: \"dd8fb0be-4983-4cd1-b412-0c170edb6565\") " Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.772856 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pq8d8\" (UniqueName: \"kubernetes.io/projected/dd8fb0be-4983-4cd1-b412-0c170edb6565-kube-api-access-pq8d8\") pod \"dd8fb0be-4983-4cd1-b412-0c170edb6565\" (UID: \"dd8fb0be-4983-4cd1-b412-0c170edb6565\") " Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.772915 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dd8fb0be-4983-4cd1-b412-0c170edb6565-dns-swift-storage-0\") pod \"dd8fb0be-4983-4cd1-b412-0c170edb6565\" (UID: \"dd8fb0be-4983-4cd1-b412-0c170edb6565\") " Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.772942 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dd8fb0be-4983-4cd1-b412-0c170edb6565-ovsdbserver-nb\") pod \"dd8fb0be-4983-4cd1-b412-0c170edb6565\" (UID: \"dd8fb0be-4983-4cd1-b412-0c170edb6565\") " Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.778652 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd8fb0be-4983-4cd1-b412-0c170edb6565-kube-api-access-pq8d8" (OuterVolumeSpecName: "kube-api-access-pq8d8") pod "dd8fb0be-4983-4cd1-b412-0c170edb6565" (UID: "dd8fb0be-4983-4cd1-b412-0c170edb6565"). InnerVolumeSpecName "kube-api-access-pq8d8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.824674 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dd8fb0be-4983-4cd1-b412-0c170edb6565-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "dd8fb0be-4983-4cd1-b412-0c170edb6565" (UID: "dd8fb0be-4983-4cd1-b412-0c170edb6565"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.825511 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dd8fb0be-4983-4cd1-b412-0c170edb6565-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "dd8fb0be-4983-4cd1-b412-0c170edb6565" (UID: "dd8fb0be-4983-4cd1-b412-0c170edb6565"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.828737 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dd8fb0be-4983-4cd1-b412-0c170edb6565-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "dd8fb0be-4983-4cd1-b412-0c170edb6565" (UID: "dd8fb0be-4983-4cd1-b412-0c170edb6565"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.840829 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dd8fb0be-4983-4cd1-b412-0c170edb6565-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "dd8fb0be-4983-4cd1-b412-0c170edb6565" (UID: "dd8fb0be-4983-4cd1-b412-0c170edb6565"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.842458 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dd8fb0be-4983-4cd1-b412-0c170edb6565-config" (OuterVolumeSpecName: "config") pod "dd8fb0be-4983-4cd1-b412-0c170edb6565" (UID: "dd8fb0be-4983-4cd1-b412-0c170edb6565"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.875866 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pq8d8\" (UniqueName: \"kubernetes.io/projected/dd8fb0be-4983-4cd1-b412-0c170edb6565-kube-api-access-pq8d8\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.875911 4691 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dd8fb0be-4983-4cd1-b412-0c170edb6565-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.875924 4691 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dd8fb0be-4983-4cd1-b412-0c170edb6565-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.875941 4691 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dd8fb0be-4983-4cd1-b412-0c170edb6565-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.875953 4691 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dd8fb0be-4983-4cd1-b412-0c170edb6565-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:30 crc kubenswrapper[4691]: I1124 08:17:30.875965 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd8fb0be-4983-4cd1-b412-0c170edb6565-config\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:31 crc kubenswrapper[4691]: I1124 08:17:31.066480 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78c64bc9c5-gx44c"] Nov 24 08:17:31 crc kubenswrapper[4691]: I1124 08:17:31.243320 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78c64bc9c5-gx44c" event={"ID":"2f860729-9ea4-4236-9465-68ac2164ac5c","Type":"ContainerStarted","Data":"cd2131d29caaba14347909ec603b240dc9713e23001927e8b8f7291996d9308b"} Nov 24 08:17:31 crc kubenswrapper[4691]: I1124 08:17:31.245994 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-pqq4l" event={"ID":"dd8fb0be-4983-4cd1-b412-0c170edb6565","Type":"ContainerDied","Data":"e6bc689f302bb71988ec06b0453a5540aba74104613b448dd8a95a7a1115c4e7"} Nov 24 08:17:31 crc kubenswrapper[4691]: I1124 08:17:31.246036 4691 scope.go:117] "RemoveContainer" containerID="a7d9fa456c5dcccd50a8c3e8c704c957e6c0ee57868d18e70db5f64d25439421" Nov 24 08:17:31 crc kubenswrapper[4691]: I1124 08:17:31.246212 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-pqq4l" Nov 24 08:17:31 crc kubenswrapper[4691]: I1124 08:17:31.273668 4691 scope.go:117] "RemoveContainer" containerID="b3e97fa35c97c05c5a6724781a977c2e2c401a39bf1b847fc1c9748377c2f106" Nov 24 08:17:31 crc kubenswrapper[4691]: I1124 08:17:31.305974 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-pqq4l"] Nov 24 08:17:31 crc kubenswrapper[4691]: I1124 08:17:31.318105 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-pqq4l"] Nov 24 08:17:32 crc kubenswrapper[4691]: I1124 08:17:32.259090 4691 generic.go:334] "Generic (PLEG): container finished" podID="2f860729-9ea4-4236-9465-68ac2164ac5c" containerID="a50c891022e3eb020bdf59ac315742b639099f837f889802849435dbaf1b89c9" exitCode=0 Nov 24 08:17:32 crc kubenswrapper[4691]: I1124 08:17:32.259150 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78c64bc9c5-gx44c" event={"ID":"2f860729-9ea4-4236-9465-68ac2164ac5c","Type":"ContainerDied","Data":"a50c891022e3eb020bdf59ac315742b639099f837f889802849435dbaf1b89c9"} Nov 24 08:17:32 crc kubenswrapper[4691]: I1124 08:17:32.774921 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd8fb0be-4983-4cd1-b412-0c170edb6565" path="/var/lib/kubelet/pods/dd8fb0be-4983-4cd1-b412-0c170edb6565/volumes" Nov 24 08:17:33 crc kubenswrapper[4691]: I1124 08:17:33.277507 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78c64bc9c5-gx44c" event={"ID":"2f860729-9ea4-4236-9465-68ac2164ac5c","Type":"ContainerStarted","Data":"42cb3f1683da4e067ea53a3562336917cd9935b3f29c4c9fb32336fb6615a4bc"} Nov 24 08:17:33 crc kubenswrapper[4691]: I1124 08:17:33.277756 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-78c64bc9c5-gx44c" Nov 24 08:17:33 crc kubenswrapper[4691]: I1124 08:17:33.305699 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-78c64bc9c5-gx44c" podStartSLOduration=3.305679792 podStartE2EDuration="3.305679792s" podCreationTimestamp="2025-11-24 08:17:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:17:33.299711259 +0000 UTC m=+1215.298660508" watchObservedRunningTime="2025-11-24 08:17:33.305679792 +0000 UTC m=+1215.304629041" Nov 24 08:17:40 crc kubenswrapper[4691]: I1124 08:17:40.564803 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-78c64bc9c5-gx44c" Nov 24 08:17:40 crc kubenswrapper[4691]: I1124 08:17:40.653981 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-d558885bc-hhp85"] Nov 24 08:17:40 crc kubenswrapper[4691]: I1124 08:17:40.654315 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-d558885bc-hhp85" podUID="64099042-ba99-4d8a-8419-4ca6e9dd0aa4" containerName="dnsmasq-dns" containerID="cri-o://36d325279ceddbd0c0fcd2824cb9ffc3cf2ed79139616375967676504ec5c48d" gracePeriod=10 Nov 24 08:17:40 crc kubenswrapper[4691]: E1124 08:17:40.935978 4691 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod64099042_ba99_4d8a_8419_4ca6e9dd0aa4.slice/crio-conmon-36d325279ceddbd0c0fcd2824cb9ffc3cf2ed79139616375967676504ec5c48d.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod64099042_ba99_4d8a_8419_4ca6e9dd0aa4.slice/crio-36d325279ceddbd0c0fcd2824cb9ffc3cf2ed79139616375967676504ec5c48d.scope\": RecentStats: unable to find data in memory cache]" Nov 24 08:17:41 crc kubenswrapper[4691]: I1124 08:17:41.264321 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d558885bc-hhp85" Nov 24 08:17:41 crc kubenswrapper[4691]: I1124 08:17:41.323787 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-ovsdbserver-sb\") pod \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\" (UID: \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\") " Nov 24 08:17:41 crc kubenswrapper[4691]: I1124 08:17:41.324105 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-config\") pod \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\" (UID: \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\") " Nov 24 08:17:41 crc kubenswrapper[4691]: I1124 08:17:41.324213 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-ovsdbserver-nb\") pod \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\" (UID: \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\") " Nov 24 08:17:41 crc kubenswrapper[4691]: I1124 08:17:41.324320 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l7t4k\" (UniqueName: \"kubernetes.io/projected/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-kube-api-access-l7t4k\") pod \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\" (UID: \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\") " Nov 24 08:17:41 crc kubenswrapper[4691]: I1124 08:17:41.324474 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-dns-swift-storage-0\") pod \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\" (UID: \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\") " Nov 24 08:17:41 crc kubenswrapper[4691]: I1124 08:17:41.324725 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-openstack-edpm-ipam\") pod \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\" (UID: \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\") " Nov 24 08:17:41 crc kubenswrapper[4691]: I1124 08:17:41.324956 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-dns-svc\") pod \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\" (UID: \"64099042-ba99-4d8a-8419-4ca6e9dd0aa4\") " Nov 24 08:17:41 crc kubenswrapper[4691]: I1124 08:17:41.331738 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-kube-api-access-l7t4k" (OuterVolumeSpecName: "kube-api-access-l7t4k") pod "64099042-ba99-4d8a-8419-4ca6e9dd0aa4" (UID: "64099042-ba99-4d8a-8419-4ca6e9dd0aa4"). InnerVolumeSpecName "kube-api-access-l7t4k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:17:41 crc kubenswrapper[4691]: I1124 08:17:41.387041 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-config" (OuterVolumeSpecName: "config") pod "64099042-ba99-4d8a-8419-4ca6e9dd0aa4" (UID: "64099042-ba99-4d8a-8419-4ca6e9dd0aa4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:17:41 crc kubenswrapper[4691]: I1124 08:17:41.387156 4691 generic.go:334] "Generic (PLEG): container finished" podID="64099042-ba99-4d8a-8419-4ca6e9dd0aa4" containerID="36d325279ceddbd0c0fcd2824cb9ffc3cf2ed79139616375967676504ec5c48d" exitCode=0 Nov 24 08:17:41 crc kubenswrapper[4691]: I1124 08:17:41.387213 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d558885bc-hhp85" Nov 24 08:17:41 crc kubenswrapper[4691]: I1124 08:17:41.387229 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d558885bc-hhp85" event={"ID":"64099042-ba99-4d8a-8419-4ca6e9dd0aa4","Type":"ContainerDied","Data":"36d325279ceddbd0c0fcd2824cb9ffc3cf2ed79139616375967676504ec5c48d"} Nov 24 08:17:41 crc kubenswrapper[4691]: I1124 08:17:41.387296 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d558885bc-hhp85" event={"ID":"64099042-ba99-4d8a-8419-4ca6e9dd0aa4","Type":"ContainerDied","Data":"7d478cb4430094acfa498540658d637fb46a4c6b04a7e7ad2b3184852aabfa39"} Nov 24 08:17:41 crc kubenswrapper[4691]: I1124 08:17:41.387329 4691 scope.go:117] "RemoveContainer" containerID="36d325279ceddbd0c0fcd2824cb9ffc3cf2ed79139616375967676504ec5c48d" Nov 24 08:17:41 crc kubenswrapper[4691]: I1124 08:17:41.393654 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "64099042-ba99-4d8a-8419-4ca6e9dd0aa4" (UID: "64099042-ba99-4d8a-8419-4ca6e9dd0aa4"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:17:41 crc kubenswrapper[4691]: I1124 08:17:41.403587 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "64099042-ba99-4d8a-8419-4ca6e9dd0aa4" (UID: "64099042-ba99-4d8a-8419-4ca6e9dd0aa4"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:17:41 crc kubenswrapper[4691]: I1124 08:17:41.412567 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "64099042-ba99-4d8a-8419-4ca6e9dd0aa4" (UID: "64099042-ba99-4d8a-8419-4ca6e9dd0aa4"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:17:41 crc kubenswrapper[4691]: I1124 08:17:41.421662 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "64099042-ba99-4d8a-8419-4ca6e9dd0aa4" (UID: "64099042-ba99-4d8a-8419-4ca6e9dd0aa4"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:17:41 crc kubenswrapper[4691]: I1124 08:17:41.428958 4691 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:41 crc kubenswrapper[4691]: I1124 08:17:41.428983 4691 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-config\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:41 crc kubenswrapper[4691]: I1124 08:17:41.429012 4691 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:41 crc kubenswrapper[4691]: I1124 08:17:41.429022 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l7t4k\" (UniqueName: \"kubernetes.io/projected/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-kube-api-access-l7t4k\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:41 crc kubenswrapper[4691]: I1124 08:17:41.429031 4691 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:41 crc kubenswrapper[4691]: I1124 08:17:41.429039 4691 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:41 crc kubenswrapper[4691]: I1124 08:17:41.446305 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "64099042-ba99-4d8a-8419-4ca6e9dd0aa4" (UID: "64099042-ba99-4d8a-8419-4ca6e9dd0aa4"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:17:41 crc kubenswrapper[4691]: I1124 08:17:41.457480 4691 scope.go:117] "RemoveContainer" containerID="12452fda145d8ba970192ed0ba63060699dd4541d98c6e5edccd476864dc54a6" Nov 24 08:17:41 crc kubenswrapper[4691]: I1124 08:17:41.484656 4691 scope.go:117] "RemoveContainer" containerID="36d325279ceddbd0c0fcd2824cb9ffc3cf2ed79139616375967676504ec5c48d" Nov 24 08:17:41 crc kubenswrapper[4691]: E1124 08:17:41.485247 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"36d325279ceddbd0c0fcd2824cb9ffc3cf2ed79139616375967676504ec5c48d\": container with ID starting with 36d325279ceddbd0c0fcd2824cb9ffc3cf2ed79139616375967676504ec5c48d not found: ID does not exist" containerID="36d325279ceddbd0c0fcd2824cb9ffc3cf2ed79139616375967676504ec5c48d" Nov 24 08:17:41 crc kubenswrapper[4691]: I1124 08:17:41.485297 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"36d325279ceddbd0c0fcd2824cb9ffc3cf2ed79139616375967676504ec5c48d"} err="failed to get container status \"36d325279ceddbd0c0fcd2824cb9ffc3cf2ed79139616375967676504ec5c48d\": rpc error: code = NotFound desc = could not find container \"36d325279ceddbd0c0fcd2824cb9ffc3cf2ed79139616375967676504ec5c48d\": container with ID starting with 36d325279ceddbd0c0fcd2824cb9ffc3cf2ed79139616375967676504ec5c48d not found: ID does not exist" Nov 24 08:17:41 crc kubenswrapper[4691]: I1124 08:17:41.485327 4691 scope.go:117] "RemoveContainer" containerID="12452fda145d8ba970192ed0ba63060699dd4541d98c6e5edccd476864dc54a6" Nov 24 08:17:41 crc kubenswrapper[4691]: E1124 08:17:41.485862 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"12452fda145d8ba970192ed0ba63060699dd4541d98c6e5edccd476864dc54a6\": container with ID starting with 12452fda145d8ba970192ed0ba63060699dd4541d98c6e5edccd476864dc54a6 not found: ID does not exist" containerID="12452fda145d8ba970192ed0ba63060699dd4541d98c6e5edccd476864dc54a6" Nov 24 08:17:41 crc kubenswrapper[4691]: I1124 08:17:41.485951 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12452fda145d8ba970192ed0ba63060699dd4541d98c6e5edccd476864dc54a6"} err="failed to get container status \"12452fda145d8ba970192ed0ba63060699dd4541d98c6e5edccd476864dc54a6\": rpc error: code = NotFound desc = could not find container \"12452fda145d8ba970192ed0ba63060699dd4541d98c6e5edccd476864dc54a6\": container with ID starting with 12452fda145d8ba970192ed0ba63060699dd4541d98c6e5edccd476864dc54a6 not found: ID does not exist" Nov 24 08:17:41 crc kubenswrapper[4691]: I1124 08:17:41.530890 4691 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/64099042-ba99-4d8a-8419-4ca6e9dd0aa4-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 24 08:17:41 crc kubenswrapper[4691]: I1124 08:17:41.721651 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-d558885bc-hhp85"] Nov 24 08:17:41 crc kubenswrapper[4691]: I1124 08:17:41.729826 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-d558885bc-hhp85"] Nov 24 08:17:42 crc kubenswrapper[4691]: I1124 08:17:42.775778 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="64099042-ba99-4d8a-8419-4ca6e9dd0aa4" path="/var/lib/kubelet/pods/64099042-ba99-4d8a-8419-4ca6e9dd0aa4/volumes" Nov 24 08:17:54 crc kubenswrapper[4691]: I1124 08:17:54.003042 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd"] Nov 24 08:17:54 crc kubenswrapper[4691]: E1124 08:17:54.004217 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64099042-ba99-4d8a-8419-4ca6e9dd0aa4" containerName="dnsmasq-dns" Nov 24 08:17:54 crc kubenswrapper[4691]: I1124 08:17:54.004241 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="64099042-ba99-4d8a-8419-4ca6e9dd0aa4" containerName="dnsmasq-dns" Nov 24 08:17:54 crc kubenswrapper[4691]: E1124 08:17:54.004262 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd8fb0be-4983-4cd1-b412-0c170edb6565" containerName="init" Nov 24 08:17:54 crc kubenswrapper[4691]: I1124 08:17:54.004272 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd8fb0be-4983-4cd1-b412-0c170edb6565" containerName="init" Nov 24 08:17:54 crc kubenswrapper[4691]: E1124 08:17:54.004322 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64099042-ba99-4d8a-8419-4ca6e9dd0aa4" containerName="init" Nov 24 08:17:54 crc kubenswrapper[4691]: I1124 08:17:54.004333 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="64099042-ba99-4d8a-8419-4ca6e9dd0aa4" containerName="init" Nov 24 08:17:54 crc kubenswrapper[4691]: E1124 08:17:54.004355 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd8fb0be-4983-4cd1-b412-0c170edb6565" containerName="dnsmasq-dns" Nov 24 08:17:54 crc kubenswrapper[4691]: I1124 08:17:54.004366 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd8fb0be-4983-4cd1-b412-0c170edb6565" containerName="dnsmasq-dns" Nov 24 08:17:54 crc kubenswrapper[4691]: I1124 08:17:54.004719 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd8fb0be-4983-4cd1-b412-0c170edb6565" containerName="dnsmasq-dns" Nov 24 08:17:54 crc kubenswrapper[4691]: I1124 08:17:54.004750 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="64099042-ba99-4d8a-8419-4ca6e9dd0aa4" containerName="dnsmasq-dns" Nov 24 08:17:54 crc kubenswrapper[4691]: I1124 08:17:54.005811 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd" Nov 24 08:17:54 crc kubenswrapper[4691]: I1124 08:17:54.012668 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 08:17:54 crc kubenswrapper[4691]: I1124 08:17:54.012994 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 08:17:54 crc kubenswrapper[4691]: I1124 08:17:54.013017 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-l69jd" Nov 24 08:17:54 crc kubenswrapper[4691]: I1124 08:17:54.013115 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 08:17:54 crc kubenswrapper[4691]: I1124 08:17:54.031134 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd"] Nov 24 08:17:54 crc kubenswrapper[4691]: I1124 08:17:54.084939 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7b0cd66f-4531-45fd-aea8-00726f118662-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd\" (UID: \"7b0cd66f-4531-45fd-aea8-00726f118662\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd" Nov 24 08:17:54 crc kubenswrapper[4691]: I1124 08:17:54.084995 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b0cd66f-4531-45fd-aea8-00726f118662-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd\" (UID: \"7b0cd66f-4531-45fd-aea8-00726f118662\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd" Nov 24 08:17:54 crc kubenswrapper[4691]: I1124 08:17:54.085073 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nv6c7\" (UniqueName: \"kubernetes.io/projected/7b0cd66f-4531-45fd-aea8-00726f118662-kube-api-access-nv6c7\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd\" (UID: \"7b0cd66f-4531-45fd-aea8-00726f118662\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd" Nov 24 08:17:54 crc kubenswrapper[4691]: I1124 08:17:54.085110 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7b0cd66f-4531-45fd-aea8-00726f118662-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd\" (UID: \"7b0cd66f-4531-45fd-aea8-00726f118662\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd" Nov 24 08:17:54 crc kubenswrapper[4691]: I1124 08:17:54.187692 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7b0cd66f-4531-45fd-aea8-00726f118662-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd\" (UID: \"7b0cd66f-4531-45fd-aea8-00726f118662\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd" Nov 24 08:17:54 crc kubenswrapper[4691]: I1124 08:17:54.187756 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b0cd66f-4531-45fd-aea8-00726f118662-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd\" (UID: \"7b0cd66f-4531-45fd-aea8-00726f118662\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd" Nov 24 08:17:54 crc kubenswrapper[4691]: I1124 08:17:54.187810 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nv6c7\" (UniqueName: \"kubernetes.io/projected/7b0cd66f-4531-45fd-aea8-00726f118662-kube-api-access-nv6c7\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd\" (UID: \"7b0cd66f-4531-45fd-aea8-00726f118662\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd" Nov 24 08:17:54 crc kubenswrapper[4691]: I1124 08:17:54.187860 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7b0cd66f-4531-45fd-aea8-00726f118662-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd\" (UID: \"7b0cd66f-4531-45fd-aea8-00726f118662\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd" Nov 24 08:17:54 crc kubenswrapper[4691]: I1124 08:17:54.193736 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7b0cd66f-4531-45fd-aea8-00726f118662-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd\" (UID: \"7b0cd66f-4531-45fd-aea8-00726f118662\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd" Nov 24 08:17:54 crc kubenswrapper[4691]: I1124 08:17:54.195558 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b0cd66f-4531-45fd-aea8-00726f118662-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd\" (UID: \"7b0cd66f-4531-45fd-aea8-00726f118662\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd" Nov 24 08:17:54 crc kubenswrapper[4691]: I1124 08:17:54.201203 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7b0cd66f-4531-45fd-aea8-00726f118662-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd\" (UID: \"7b0cd66f-4531-45fd-aea8-00726f118662\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd" Nov 24 08:17:54 crc kubenswrapper[4691]: I1124 08:17:54.212998 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nv6c7\" (UniqueName: \"kubernetes.io/projected/7b0cd66f-4531-45fd-aea8-00726f118662-kube-api-access-nv6c7\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd\" (UID: \"7b0cd66f-4531-45fd-aea8-00726f118662\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd" Nov 24 08:17:54 crc kubenswrapper[4691]: I1124 08:17:54.334065 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd" Nov 24 08:17:54 crc kubenswrapper[4691]: I1124 08:17:54.885116 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd"] Nov 24 08:17:54 crc kubenswrapper[4691]: W1124 08:17:54.889146 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7b0cd66f_4531_45fd_aea8_00726f118662.slice/crio-5fb8c0a9ad9417cdfe4e98f91a5e8233c00bbb027f474cc85c1f7d1e8074ea74 WatchSource:0}: Error finding container 5fb8c0a9ad9417cdfe4e98f91a5e8233c00bbb027f474cc85c1f7d1e8074ea74: Status 404 returned error can't find the container with id 5fb8c0a9ad9417cdfe4e98f91a5e8233c00bbb027f474cc85c1f7d1e8074ea74 Nov 24 08:17:54 crc kubenswrapper[4691]: I1124 08:17:54.892543 4691 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 08:17:55 crc kubenswrapper[4691]: I1124 08:17:55.526880 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd" event={"ID":"7b0cd66f-4531-45fd-aea8-00726f118662","Type":"ContainerStarted","Data":"5fb8c0a9ad9417cdfe4e98f91a5e8233c00bbb027f474cc85c1f7d1e8074ea74"} Nov 24 08:17:56 crc kubenswrapper[4691]: I1124 08:17:56.543352 4691 generic.go:334] "Generic (PLEG): container finished" podID="19b40ace-19bb-41b3-8b25-f93691331766" containerID="5b451eabb3321ad4f5da5172263e3b2a1df211d98a0e4622266cc4b2d4bde1b2" exitCode=0 Nov 24 08:17:56 crc kubenswrapper[4691]: I1124 08:17:56.543417 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"19b40ace-19bb-41b3-8b25-f93691331766","Type":"ContainerDied","Data":"5b451eabb3321ad4f5da5172263e3b2a1df211d98a0e4622266cc4b2d4bde1b2"} Nov 24 08:17:58 crc kubenswrapper[4691]: I1124 08:17:58.578824 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"19b40ace-19bb-41b3-8b25-f93691331766","Type":"ContainerStarted","Data":"df16cf575935495ec82e67320934d87c6fae19a196e4c04f05951ebd2b64eb7a"} Nov 24 08:17:58 crc kubenswrapper[4691]: I1124 08:17:58.579736 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 24 08:17:58 crc kubenswrapper[4691]: I1124 08:17:58.582894 4691 generic.go:334] "Generic (PLEG): container finished" podID="4e65164c-c11a-4774-808c-f0dbdf7f9ffa" containerID="0f46aa79573ce238fff8d4afd4fea2abc992000f6ccf57941b63b8e6e621f456" exitCode=0 Nov 24 08:17:58 crc kubenswrapper[4691]: I1124 08:17:58.582961 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"4e65164c-c11a-4774-808c-f0dbdf7f9ffa","Type":"ContainerDied","Data":"0f46aa79573ce238fff8d4afd4fea2abc992000f6ccf57941b63b8e6e621f456"} Nov 24 08:17:58 crc kubenswrapper[4691]: I1124 08:17:58.617999 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=37.617978249 podStartE2EDuration="37.617978249s" podCreationTimestamp="2025-11-24 08:17:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:17:58.603988316 +0000 UTC m=+1240.602937585" watchObservedRunningTime="2025-11-24 08:17:58.617978249 +0000 UTC m=+1240.616927498" Nov 24 08:18:03 crc kubenswrapper[4691]: I1124 08:18:03.634204 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"4e65164c-c11a-4774-808c-f0dbdf7f9ffa","Type":"ContainerStarted","Data":"46c86d79005be5019f6e0f2af8c6b96ad5f707a14c87c625349f7cb45b8a6e3e"} Nov 24 08:18:03 crc kubenswrapper[4691]: I1124 08:18:03.634954 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:18:03 crc kubenswrapper[4691]: I1124 08:18:03.638556 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd" event={"ID":"7b0cd66f-4531-45fd-aea8-00726f118662","Type":"ContainerStarted","Data":"60f1c3c35fa26a0a717f9313e1aae66d03a632e57c8e9c2a7417ea1e1428415b"} Nov 24 08:18:03 crc kubenswrapper[4691]: I1124 08:18:03.671735 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=41.671718978 podStartE2EDuration="41.671718978s" podCreationTimestamp="2025-11-24 08:17:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:18:03.666823547 +0000 UTC m=+1245.665772796" watchObservedRunningTime="2025-11-24 08:18:03.671718978 +0000 UTC m=+1245.670668227" Nov 24 08:18:03 crc kubenswrapper[4691]: I1124 08:18:03.685767 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd" podStartSLOduration=2.2320336960000002 podStartE2EDuration="10.685742221s" podCreationTimestamp="2025-11-24 08:17:53 +0000 UTC" firstStartedPulling="2025-11-24 08:17:54.892226565 +0000 UTC m=+1236.891175824" lastFinishedPulling="2025-11-24 08:18:03.3459351 +0000 UTC m=+1245.344884349" observedRunningTime="2025-11-24 08:18:03.683160877 +0000 UTC m=+1245.682110146" watchObservedRunningTime="2025-11-24 08:18:03.685742221 +0000 UTC m=+1245.684691470" Nov 24 08:18:11 crc kubenswrapper[4691]: I1124 08:18:11.776693 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 24 08:18:15 crc kubenswrapper[4691]: I1124 08:18:15.774656 4691 generic.go:334] "Generic (PLEG): container finished" podID="7b0cd66f-4531-45fd-aea8-00726f118662" containerID="60f1c3c35fa26a0a717f9313e1aae66d03a632e57c8e9c2a7417ea1e1428415b" exitCode=0 Nov 24 08:18:15 crc kubenswrapper[4691]: I1124 08:18:15.774746 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd" event={"ID":"7b0cd66f-4531-45fd-aea8-00726f118662","Type":"ContainerDied","Data":"60f1c3c35fa26a0a717f9313e1aae66d03a632e57c8e9c2a7417ea1e1428415b"} Nov 24 08:18:17 crc kubenswrapper[4691]: I1124 08:18:17.219334 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd" Nov 24 08:18:17 crc kubenswrapper[4691]: I1124 08:18:17.337563 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7b0cd66f-4531-45fd-aea8-00726f118662-ssh-key\") pod \"7b0cd66f-4531-45fd-aea8-00726f118662\" (UID: \"7b0cd66f-4531-45fd-aea8-00726f118662\") " Nov 24 08:18:17 crc kubenswrapper[4691]: I1124 08:18:17.338026 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7b0cd66f-4531-45fd-aea8-00726f118662-inventory\") pod \"7b0cd66f-4531-45fd-aea8-00726f118662\" (UID: \"7b0cd66f-4531-45fd-aea8-00726f118662\") " Nov 24 08:18:17 crc kubenswrapper[4691]: I1124 08:18:17.338276 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nv6c7\" (UniqueName: \"kubernetes.io/projected/7b0cd66f-4531-45fd-aea8-00726f118662-kube-api-access-nv6c7\") pod \"7b0cd66f-4531-45fd-aea8-00726f118662\" (UID: \"7b0cd66f-4531-45fd-aea8-00726f118662\") " Nov 24 08:18:17 crc kubenswrapper[4691]: I1124 08:18:17.338352 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b0cd66f-4531-45fd-aea8-00726f118662-repo-setup-combined-ca-bundle\") pod \"7b0cd66f-4531-45fd-aea8-00726f118662\" (UID: \"7b0cd66f-4531-45fd-aea8-00726f118662\") " Nov 24 08:18:17 crc kubenswrapper[4691]: I1124 08:18:17.344477 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b0cd66f-4531-45fd-aea8-00726f118662-kube-api-access-nv6c7" (OuterVolumeSpecName: "kube-api-access-nv6c7") pod "7b0cd66f-4531-45fd-aea8-00726f118662" (UID: "7b0cd66f-4531-45fd-aea8-00726f118662"). InnerVolumeSpecName "kube-api-access-nv6c7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:18:17 crc kubenswrapper[4691]: I1124 08:18:17.345551 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b0cd66f-4531-45fd-aea8-00726f118662-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "7b0cd66f-4531-45fd-aea8-00726f118662" (UID: "7b0cd66f-4531-45fd-aea8-00726f118662"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:18:17 crc kubenswrapper[4691]: I1124 08:18:17.374535 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b0cd66f-4531-45fd-aea8-00726f118662-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7b0cd66f-4531-45fd-aea8-00726f118662" (UID: "7b0cd66f-4531-45fd-aea8-00726f118662"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:18:17 crc kubenswrapper[4691]: I1124 08:18:17.385721 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b0cd66f-4531-45fd-aea8-00726f118662-inventory" (OuterVolumeSpecName: "inventory") pod "7b0cd66f-4531-45fd-aea8-00726f118662" (UID: "7b0cd66f-4531-45fd-aea8-00726f118662"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:18:17 crc kubenswrapper[4691]: I1124 08:18:17.440935 4691 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b0cd66f-4531-45fd-aea8-00726f118662-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:18:17 crc kubenswrapper[4691]: I1124 08:18:17.440970 4691 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7b0cd66f-4531-45fd-aea8-00726f118662-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 08:18:17 crc kubenswrapper[4691]: I1124 08:18:17.440980 4691 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7b0cd66f-4531-45fd-aea8-00726f118662-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 08:18:17 crc kubenswrapper[4691]: I1124 08:18:17.440989 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nv6c7\" (UniqueName: \"kubernetes.io/projected/7b0cd66f-4531-45fd-aea8-00726f118662-kube-api-access-nv6c7\") on node \"crc\" DevicePath \"\"" Nov 24 08:18:17 crc kubenswrapper[4691]: I1124 08:18:17.795927 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd" event={"ID":"7b0cd66f-4531-45fd-aea8-00726f118662","Type":"ContainerDied","Data":"5fb8c0a9ad9417cdfe4e98f91a5e8233c00bbb027f474cc85c1f7d1e8074ea74"} Nov 24 08:18:17 crc kubenswrapper[4691]: I1124 08:18:17.795974 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5fb8c0a9ad9417cdfe4e98f91a5e8233c00bbb027f474cc85c1f7d1e8074ea74" Nov 24 08:18:17 crc kubenswrapper[4691]: I1124 08:18:17.796041 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd" Nov 24 08:18:17 crc kubenswrapper[4691]: I1124 08:18:17.895215 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-ncxlx"] Nov 24 08:18:17 crc kubenswrapper[4691]: E1124 08:18:17.895657 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b0cd66f-4531-45fd-aea8-00726f118662" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 24 08:18:17 crc kubenswrapper[4691]: I1124 08:18:17.895676 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b0cd66f-4531-45fd-aea8-00726f118662" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 24 08:18:17 crc kubenswrapper[4691]: I1124 08:18:17.895863 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b0cd66f-4531-45fd-aea8-00726f118662" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 24 08:18:17 crc kubenswrapper[4691]: I1124 08:18:17.896698 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ncxlx" Nov 24 08:18:17 crc kubenswrapper[4691]: I1124 08:18:17.898911 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 08:18:17 crc kubenswrapper[4691]: I1124 08:18:17.899311 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 08:18:17 crc kubenswrapper[4691]: I1124 08:18:17.899737 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-l69jd" Nov 24 08:18:17 crc kubenswrapper[4691]: I1124 08:18:17.900222 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 08:18:17 crc kubenswrapper[4691]: I1124 08:18:17.917312 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-ncxlx"] Nov 24 08:18:18 crc kubenswrapper[4691]: I1124 08:18:18.055040 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4e07a8ba-4deb-45cb-8ecd-423300eadb7a-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-ncxlx\" (UID: \"4e07a8ba-4deb-45cb-8ecd-423300eadb7a\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ncxlx" Nov 24 08:18:18 crc kubenswrapper[4691]: I1124 08:18:18.055697 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4e07a8ba-4deb-45cb-8ecd-423300eadb7a-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-ncxlx\" (UID: \"4e07a8ba-4deb-45cb-8ecd-423300eadb7a\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ncxlx" Nov 24 08:18:18 crc kubenswrapper[4691]: I1124 08:18:18.056120 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-th6hg\" (UniqueName: \"kubernetes.io/projected/4e07a8ba-4deb-45cb-8ecd-423300eadb7a-kube-api-access-th6hg\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-ncxlx\" (UID: \"4e07a8ba-4deb-45cb-8ecd-423300eadb7a\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ncxlx" Nov 24 08:18:18 crc kubenswrapper[4691]: I1124 08:18:18.159340 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4e07a8ba-4deb-45cb-8ecd-423300eadb7a-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-ncxlx\" (UID: \"4e07a8ba-4deb-45cb-8ecd-423300eadb7a\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ncxlx" Nov 24 08:18:18 crc kubenswrapper[4691]: I1124 08:18:18.159483 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4e07a8ba-4deb-45cb-8ecd-423300eadb7a-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-ncxlx\" (UID: \"4e07a8ba-4deb-45cb-8ecd-423300eadb7a\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ncxlx" Nov 24 08:18:18 crc kubenswrapper[4691]: I1124 08:18:18.159557 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-th6hg\" (UniqueName: \"kubernetes.io/projected/4e07a8ba-4deb-45cb-8ecd-423300eadb7a-kube-api-access-th6hg\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-ncxlx\" (UID: \"4e07a8ba-4deb-45cb-8ecd-423300eadb7a\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ncxlx" Nov 24 08:18:18 crc kubenswrapper[4691]: I1124 08:18:18.173334 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4e07a8ba-4deb-45cb-8ecd-423300eadb7a-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-ncxlx\" (UID: \"4e07a8ba-4deb-45cb-8ecd-423300eadb7a\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ncxlx" Nov 24 08:18:18 crc kubenswrapper[4691]: I1124 08:18:18.174096 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4e07a8ba-4deb-45cb-8ecd-423300eadb7a-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-ncxlx\" (UID: \"4e07a8ba-4deb-45cb-8ecd-423300eadb7a\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ncxlx" Nov 24 08:18:18 crc kubenswrapper[4691]: I1124 08:18:18.176900 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-th6hg\" (UniqueName: \"kubernetes.io/projected/4e07a8ba-4deb-45cb-8ecd-423300eadb7a-kube-api-access-th6hg\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-ncxlx\" (UID: \"4e07a8ba-4deb-45cb-8ecd-423300eadb7a\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ncxlx" Nov 24 08:18:18 crc kubenswrapper[4691]: I1124 08:18:18.216868 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ncxlx" Nov 24 08:18:18 crc kubenswrapper[4691]: I1124 08:18:18.792788 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-ncxlx"] Nov 24 08:18:18 crc kubenswrapper[4691]: I1124 08:18:18.811341 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ncxlx" event={"ID":"4e07a8ba-4deb-45cb-8ecd-423300eadb7a","Type":"ContainerStarted","Data":"057e77c4ec7743b8ea6530a2cee19fdad6a5e9f4fc87875e9d2b7501fa96ace0"} Nov 24 08:18:19 crc kubenswrapper[4691]: I1124 08:18:19.380751 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 08:18:19 crc kubenswrapper[4691]: I1124 08:18:19.826381 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ncxlx" event={"ID":"4e07a8ba-4deb-45cb-8ecd-423300eadb7a","Type":"ContainerStarted","Data":"84e13f4cc15e4f75b86fd0effd1041a96e30e7e42e28102e2b6803beca47e333"} Nov 24 08:18:19 crc kubenswrapper[4691]: I1124 08:18:19.847410 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ncxlx" podStartSLOduration=2.255793228 podStartE2EDuration="2.847390677s" podCreationTimestamp="2025-11-24 08:18:17 +0000 UTC" firstStartedPulling="2025-11-24 08:18:18.786598073 +0000 UTC m=+1260.785547322" lastFinishedPulling="2025-11-24 08:18:19.378195532 +0000 UTC m=+1261.377144771" observedRunningTime="2025-11-24 08:18:19.841785376 +0000 UTC m=+1261.840734625" watchObservedRunningTime="2025-11-24 08:18:19.847390677 +0000 UTC m=+1261.846339926" Nov 24 08:18:22 crc kubenswrapper[4691]: I1124 08:18:22.540654 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 24 08:18:22 crc kubenswrapper[4691]: I1124 08:18:22.857032 4691 generic.go:334] "Generic (PLEG): container finished" podID="4e07a8ba-4deb-45cb-8ecd-423300eadb7a" containerID="84e13f4cc15e4f75b86fd0effd1041a96e30e7e42e28102e2b6803beca47e333" exitCode=0 Nov 24 08:18:22 crc kubenswrapper[4691]: I1124 08:18:22.857061 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ncxlx" event={"ID":"4e07a8ba-4deb-45cb-8ecd-423300eadb7a","Type":"ContainerDied","Data":"84e13f4cc15e4f75b86fd0effd1041a96e30e7e42e28102e2b6803beca47e333"} Nov 24 08:18:24 crc kubenswrapper[4691]: I1124 08:18:24.315440 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ncxlx" Nov 24 08:18:24 crc kubenswrapper[4691]: I1124 08:18:24.411795 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4e07a8ba-4deb-45cb-8ecd-423300eadb7a-ssh-key\") pod \"4e07a8ba-4deb-45cb-8ecd-423300eadb7a\" (UID: \"4e07a8ba-4deb-45cb-8ecd-423300eadb7a\") " Nov 24 08:18:24 crc kubenswrapper[4691]: I1124 08:18:24.412290 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-th6hg\" (UniqueName: \"kubernetes.io/projected/4e07a8ba-4deb-45cb-8ecd-423300eadb7a-kube-api-access-th6hg\") pod \"4e07a8ba-4deb-45cb-8ecd-423300eadb7a\" (UID: \"4e07a8ba-4deb-45cb-8ecd-423300eadb7a\") " Nov 24 08:18:24 crc kubenswrapper[4691]: I1124 08:18:24.412538 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4e07a8ba-4deb-45cb-8ecd-423300eadb7a-inventory\") pod \"4e07a8ba-4deb-45cb-8ecd-423300eadb7a\" (UID: \"4e07a8ba-4deb-45cb-8ecd-423300eadb7a\") " Nov 24 08:18:24 crc kubenswrapper[4691]: I1124 08:18:24.417043 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e07a8ba-4deb-45cb-8ecd-423300eadb7a-kube-api-access-th6hg" (OuterVolumeSpecName: "kube-api-access-th6hg") pod "4e07a8ba-4deb-45cb-8ecd-423300eadb7a" (UID: "4e07a8ba-4deb-45cb-8ecd-423300eadb7a"). InnerVolumeSpecName "kube-api-access-th6hg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:18:24 crc kubenswrapper[4691]: I1124 08:18:24.441437 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e07a8ba-4deb-45cb-8ecd-423300eadb7a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "4e07a8ba-4deb-45cb-8ecd-423300eadb7a" (UID: "4e07a8ba-4deb-45cb-8ecd-423300eadb7a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:18:24 crc kubenswrapper[4691]: I1124 08:18:24.443903 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e07a8ba-4deb-45cb-8ecd-423300eadb7a-inventory" (OuterVolumeSpecName: "inventory") pod "4e07a8ba-4deb-45cb-8ecd-423300eadb7a" (UID: "4e07a8ba-4deb-45cb-8ecd-423300eadb7a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:18:24 crc kubenswrapper[4691]: I1124 08:18:24.515075 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-th6hg\" (UniqueName: \"kubernetes.io/projected/4e07a8ba-4deb-45cb-8ecd-423300eadb7a-kube-api-access-th6hg\") on node \"crc\" DevicePath \"\"" Nov 24 08:18:24 crc kubenswrapper[4691]: I1124 08:18:24.515107 4691 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4e07a8ba-4deb-45cb-8ecd-423300eadb7a-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 08:18:24 crc kubenswrapper[4691]: I1124 08:18:24.515116 4691 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4e07a8ba-4deb-45cb-8ecd-423300eadb7a-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 08:18:24 crc kubenswrapper[4691]: I1124 08:18:24.886367 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ncxlx" event={"ID":"4e07a8ba-4deb-45cb-8ecd-423300eadb7a","Type":"ContainerDied","Data":"057e77c4ec7743b8ea6530a2cee19fdad6a5e9f4fc87875e9d2b7501fa96ace0"} Nov 24 08:18:24 crc kubenswrapper[4691]: I1124 08:18:24.886411 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="057e77c4ec7743b8ea6530a2cee19fdad6a5e9f4fc87875e9d2b7501fa96ace0" Nov 24 08:18:24 crc kubenswrapper[4691]: I1124 08:18:24.886517 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ncxlx" Nov 24 08:18:24 crc kubenswrapper[4691]: I1124 08:18:24.957350 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92"] Nov 24 08:18:24 crc kubenswrapper[4691]: E1124 08:18:24.957908 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e07a8ba-4deb-45cb-8ecd-423300eadb7a" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 24 08:18:24 crc kubenswrapper[4691]: I1124 08:18:24.957930 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e07a8ba-4deb-45cb-8ecd-423300eadb7a" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 24 08:18:24 crc kubenswrapper[4691]: I1124 08:18:24.958180 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e07a8ba-4deb-45cb-8ecd-423300eadb7a" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 24 08:18:24 crc kubenswrapper[4691]: I1124 08:18:24.959000 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92" Nov 24 08:18:24 crc kubenswrapper[4691]: I1124 08:18:24.962693 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 08:18:24 crc kubenswrapper[4691]: I1124 08:18:24.962845 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 08:18:24 crc kubenswrapper[4691]: I1124 08:18:24.962988 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-l69jd" Nov 24 08:18:24 crc kubenswrapper[4691]: I1124 08:18:24.963553 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 08:18:24 crc kubenswrapper[4691]: I1124 08:18:24.966426 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92"] Nov 24 08:18:25 crc kubenswrapper[4691]: I1124 08:18:25.050075 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6199a668-e1b5-473b-8ff0-2fdf26b69c79-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92\" (UID: \"6199a668-e1b5-473b-8ff0-2fdf26b69c79\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92" Nov 24 08:18:25 crc kubenswrapper[4691]: I1124 08:18:25.050410 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m4vvn\" (UniqueName: \"kubernetes.io/projected/6199a668-e1b5-473b-8ff0-2fdf26b69c79-kube-api-access-m4vvn\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92\" (UID: \"6199a668-e1b5-473b-8ff0-2fdf26b69c79\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92" Nov 24 08:18:25 crc kubenswrapper[4691]: I1124 08:18:25.050540 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6199a668-e1b5-473b-8ff0-2fdf26b69c79-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92\" (UID: \"6199a668-e1b5-473b-8ff0-2fdf26b69c79\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92" Nov 24 08:18:25 crc kubenswrapper[4691]: I1124 08:18:25.050869 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6199a668-e1b5-473b-8ff0-2fdf26b69c79-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92\" (UID: \"6199a668-e1b5-473b-8ff0-2fdf26b69c79\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92" Nov 24 08:18:25 crc kubenswrapper[4691]: I1124 08:18:25.152294 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m4vvn\" (UniqueName: \"kubernetes.io/projected/6199a668-e1b5-473b-8ff0-2fdf26b69c79-kube-api-access-m4vvn\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92\" (UID: \"6199a668-e1b5-473b-8ff0-2fdf26b69c79\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92" Nov 24 08:18:25 crc kubenswrapper[4691]: I1124 08:18:25.152362 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6199a668-e1b5-473b-8ff0-2fdf26b69c79-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92\" (UID: \"6199a668-e1b5-473b-8ff0-2fdf26b69c79\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92" Nov 24 08:18:25 crc kubenswrapper[4691]: I1124 08:18:25.152389 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6199a668-e1b5-473b-8ff0-2fdf26b69c79-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92\" (UID: \"6199a668-e1b5-473b-8ff0-2fdf26b69c79\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92" Nov 24 08:18:25 crc kubenswrapper[4691]: I1124 08:18:25.152478 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6199a668-e1b5-473b-8ff0-2fdf26b69c79-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92\" (UID: \"6199a668-e1b5-473b-8ff0-2fdf26b69c79\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92" Nov 24 08:18:25 crc kubenswrapper[4691]: I1124 08:18:25.157345 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6199a668-e1b5-473b-8ff0-2fdf26b69c79-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92\" (UID: \"6199a668-e1b5-473b-8ff0-2fdf26b69c79\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92" Nov 24 08:18:25 crc kubenswrapper[4691]: I1124 08:18:25.157488 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6199a668-e1b5-473b-8ff0-2fdf26b69c79-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92\" (UID: \"6199a668-e1b5-473b-8ff0-2fdf26b69c79\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92" Nov 24 08:18:25 crc kubenswrapper[4691]: I1124 08:18:25.157715 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6199a668-e1b5-473b-8ff0-2fdf26b69c79-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92\" (UID: \"6199a668-e1b5-473b-8ff0-2fdf26b69c79\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92" Nov 24 08:18:25 crc kubenswrapper[4691]: I1124 08:18:25.171179 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m4vvn\" (UniqueName: \"kubernetes.io/projected/6199a668-e1b5-473b-8ff0-2fdf26b69c79-kube-api-access-m4vvn\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92\" (UID: \"6199a668-e1b5-473b-8ff0-2fdf26b69c79\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92" Nov 24 08:18:25 crc kubenswrapper[4691]: I1124 08:18:25.319674 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92" Nov 24 08:18:25 crc kubenswrapper[4691]: I1124 08:18:25.842866 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92"] Nov 24 08:18:25 crc kubenswrapper[4691]: W1124 08:18:25.847397 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6199a668_e1b5_473b_8ff0_2fdf26b69c79.slice/crio-1a451c1f33c1dd9d9437492cec63cf15e4516dc3799f9fb01e6ddb402845d437 WatchSource:0}: Error finding container 1a451c1f33c1dd9d9437492cec63cf15e4516dc3799f9fb01e6ddb402845d437: Status 404 returned error can't find the container with id 1a451c1f33c1dd9d9437492cec63cf15e4516dc3799f9fb01e6ddb402845d437 Nov 24 08:18:25 crc kubenswrapper[4691]: I1124 08:18:25.898029 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92" event={"ID":"6199a668-e1b5-473b-8ff0-2fdf26b69c79","Type":"ContainerStarted","Data":"1a451c1f33c1dd9d9437492cec63cf15e4516dc3799f9fb01e6ddb402845d437"} Nov 24 08:18:27 crc kubenswrapper[4691]: I1124 08:18:27.920787 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92" event={"ID":"6199a668-e1b5-473b-8ff0-2fdf26b69c79","Type":"ContainerStarted","Data":"d51d8c7102f68d2c6169e214941d5e94df0c2f650e442d443a45001a935ced49"} Nov 24 08:18:27 crc kubenswrapper[4691]: I1124 08:18:27.957435 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92" podStartSLOduration=3.068014058 podStartE2EDuration="3.95741715s" podCreationTimestamp="2025-11-24 08:18:24 +0000 UTC" firstStartedPulling="2025-11-24 08:18:25.851189133 +0000 UTC m=+1267.850138382" lastFinishedPulling="2025-11-24 08:18:26.740592215 +0000 UTC m=+1268.739541474" observedRunningTime="2025-11-24 08:18:27.949889853 +0000 UTC m=+1269.948839122" watchObservedRunningTime="2025-11-24 08:18:27.95741715 +0000 UTC m=+1269.956366399" Nov 24 08:18:51 crc kubenswrapper[4691]: I1124 08:18:51.089517 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:18:51 crc kubenswrapper[4691]: I1124 08:18:51.090188 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:19:20 crc kubenswrapper[4691]: I1124 08:19:20.958875 4691 scope.go:117] "RemoveContainer" containerID="eccd8ce2de7bf41e77a7fbe034d937c8020882af501290d4e72451ca8383d5f1" Nov 24 08:19:21 crc kubenswrapper[4691]: I1124 08:19:21.002731 4691 scope.go:117] "RemoveContainer" containerID="cf4cee1eb31a43ad3f71bc69a62e364c38c702de3457ba5dde1baddd7638adf8" Nov 24 08:19:21 crc kubenswrapper[4691]: I1124 08:19:21.036350 4691 scope.go:117] "RemoveContainer" containerID="ff17ebc5cb1c438b7cad9cf02c3461cbc5711563a201dba53ed7b11d80200ef4" Nov 24 08:19:21 crc kubenswrapper[4691]: I1124 08:19:21.089490 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:19:21 crc kubenswrapper[4691]: I1124 08:19:21.089558 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:19:51 crc kubenswrapper[4691]: I1124 08:19:51.089145 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:19:51 crc kubenswrapper[4691]: I1124 08:19:51.089858 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:19:51 crc kubenswrapper[4691]: I1124 08:19:51.089919 4691 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 08:19:51 crc kubenswrapper[4691]: I1124 08:19:51.090904 4691 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e9457e7b6c4c145ad63f8bf4661b5a86c1fa3b74970f7b54d789669ee7203629"} pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 08:19:51 crc kubenswrapper[4691]: I1124 08:19:51.090977 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" containerID="cri-o://e9457e7b6c4c145ad63f8bf4661b5a86c1fa3b74970f7b54d789669ee7203629" gracePeriod=600 Nov 24 08:19:51 crc kubenswrapper[4691]: I1124 08:19:51.796079 4691 generic.go:334] "Generic (PLEG): container finished" podID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerID="e9457e7b6c4c145ad63f8bf4661b5a86c1fa3b74970f7b54d789669ee7203629" exitCode=0 Nov 24 08:19:51 crc kubenswrapper[4691]: I1124 08:19:51.796146 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerDied","Data":"e9457e7b6c4c145ad63f8bf4661b5a86c1fa3b74970f7b54d789669ee7203629"} Nov 24 08:19:51 crc kubenswrapper[4691]: I1124 08:19:51.797248 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerStarted","Data":"baafdc5484696f8a6ed3299009fbc07f5ebbc71075e6742f1979ca5e2e2084cf"} Nov 24 08:19:51 crc kubenswrapper[4691]: I1124 08:19:51.797285 4691 scope.go:117] "RemoveContainer" containerID="914d4816a735e64c132874c27ca7b7bbe33f77f07f7911089de6ac4d29c8f36b" Nov 24 08:21:31 crc kubenswrapper[4691]: I1124 08:21:31.845157 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-l9px7"] Nov 24 08:21:31 crc kubenswrapper[4691]: I1124 08:21:31.848305 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l9px7" Nov 24 08:21:31 crc kubenswrapper[4691]: I1124 08:21:31.890513 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-l9px7"] Nov 24 08:21:31 crc kubenswrapper[4691]: I1124 08:21:31.927951 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6c439899-210e-48b9-8f47-31fdc17f96f2-utilities\") pod \"certified-operators-l9px7\" (UID: \"6c439899-210e-48b9-8f47-31fdc17f96f2\") " pod="openshift-marketplace/certified-operators-l9px7" Nov 24 08:21:31 crc kubenswrapper[4691]: I1124 08:21:31.928105 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6c439899-210e-48b9-8f47-31fdc17f96f2-catalog-content\") pod \"certified-operators-l9px7\" (UID: \"6c439899-210e-48b9-8f47-31fdc17f96f2\") " pod="openshift-marketplace/certified-operators-l9px7" Nov 24 08:21:31 crc kubenswrapper[4691]: I1124 08:21:31.928192 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4ddrt\" (UniqueName: \"kubernetes.io/projected/6c439899-210e-48b9-8f47-31fdc17f96f2-kube-api-access-4ddrt\") pod \"certified-operators-l9px7\" (UID: \"6c439899-210e-48b9-8f47-31fdc17f96f2\") " pod="openshift-marketplace/certified-operators-l9px7" Nov 24 08:21:32 crc kubenswrapper[4691]: I1124 08:21:32.030260 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6c439899-210e-48b9-8f47-31fdc17f96f2-utilities\") pod \"certified-operators-l9px7\" (UID: \"6c439899-210e-48b9-8f47-31fdc17f96f2\") " pod="openshift-marketplace/certified-operators-l9px7" Nov 24 08:21:32 crc kubenswrapper[4691]: I1124 08:21:32.030657 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6c439899-210e-48b9-8f47-31fdc17f96f2-catalog-content\") pod \"certified-operators-l9px7\" (UID: \"6c439899-210e-48b9-8f47-31fdc17f96f2\") " pod="openshift-marketplace/certified-operators-l9px7" Nov 24 08:21:32 crc kubenswrapper[4691]: I1124 08:21:32.030732 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4ddrt\" (UniqueName: \"kubernetes.io/projected/6c439899-210e-48b9-8f47-31fdc17f96f2-kube-api-access-4ddrt\") pod \"certified-operators-l9px7\" (UID: \"6c439899-210e-48b9-8f47-31fdc17f96f2\") " pod="openshift-marketplace/certified-operators-l9px7" Nov 24 08:21:32 crc kubenswrapper[4691]: I1124 08:21:32.030836 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6c439899-210e-48b9-8f47-31fdc17f96f2-utilities\") pod \"certified-operators-l9px7\" (UID: \"6c439899-210e-48b9-8f47-31fdc17f96f2\") " pod="openshift-marketplace/certified-operators-l9px7" Nov 24 08:21:32 crc kubenswrapper[4691]: I1124 08:21:32.031169 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6c439899-210e-48b9-8f47-31fdc17f96f2-catalog-content\") pod \"certified-operators-l9px7\" (UID: \"6c439899-210e-48b9-8f47-31fdc17f96f2\") " pod="openshift-marketplace/certified-operators-l9px7" Nov 24 08:21:32 crc kubenswrapper[4691]: I1124 08:21:32.057953 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4ddrt\" (UniqueName: \"kubernetes.io/projected/6c439899-210e-48b9-8f47-31fdc17f96f2-kube-api-access-4ddrt\") pod \"certified-operators-l9px7\" (UID: \"6c439899-210e-48b9-8f47-31fdc17f96f2\") " pod="openshift-marketplace/certified-operators-l9px7" Nov 24 08:21:32 crc kubenswrapper[4691]: I1124 08:21:32.180505 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l9px7" Nov 24 08:21:32 crc kubenswrapper[4691]: I1124 08:21:32.719968 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-l9px7"] Nov 24 08:21:32 crc kubenswrapper[4691]: I1124 08:21:32.965127 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l9px7" event={"ID":"6c439899-210e-48b9-8f47-31fdc17f96f2","Type":"ContainerStarted","Data":"53143265373ebc4c9a32314d5e791bf4df320cdd0c544cfe6d6f7c96c6342b88"} Nov 24 08:21:32 crc kubenswrapper[4691]: I1124 08:21:32.965506 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l9px7" event={"ID":"6c439899-210e-48b9-8f47-31fdc17f96f2","Type":"ContainerStarted","Data":"ea2177d6a59ac9b7b69b2dd107bdd6b84a9a08814341918036fa6f982f6507e5"} Nov 24 08:21:33 crc kubenswrapper[4691]: I1124 08:21:33.976735 4691 generic.go:334] "Generic (PLEG): container finished" podID="6c439899-210e-48b9-8f47-31fdc17f96f2" containerID="53143265373ebc4c9a32314d5e791bf4df320cdd0c544cfe6d6f7c96c6342b88" exitCode=0 Nov 24 08:21:33 crc kubenswrapper[4691]: I1124 08:21:33.976956 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l9px7" event={"ID":"6c439899-210e-48b9-8f47-31fdc17f96f2","Type":"ContainerDied","Data":"53143265373ebc4c9a32314d5e791bf4df320cdd0c544cfe6d6f7c96c6342b88"} Nov 24 08:21:34 crc kubenswrapper[4691]: I1124 08:21:34.992995 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l9px7" event={"ID":"6c439899-210e-48b9-8f47-31fdc17f96f2","Type":"ContainerStarted","Data":"341c1936e330f828030c669721470a1b4f32ae40b8dcac77ca50332e1620a555"} Nov 24 08:21:34 crc kubenswrapper[4691]: I1124 08:21:34.995594 4691 generic.go:334] "Generic (PLEG): container finished" podID="6199a668-e1b5-473b-8ff0-2fdf26b69c79" containerID="d51d8c7102f68d2c6169e214941d5e94df0c2f650e442d443a45001a935ced49" exitCode=0 Nov 24 08:21:34 crc kubenswrapper[4691]: I1124 08:21:34.995636 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92" event={"ID":"6199a668-e1b5-473b-8ff0-2fdf26b69c79","Type":"ContainerDied","Data":"d51d8c7102f68d2c6169e214941d5e94df0c2f650e442d443a45001a935ced49"} Nov 24 08:21:36 crc kubenswrapper[4691]: I1124 08:21:36.008678 4691 generic.go:334] "Generic (PLEG): container finished" podID="6c439899-210e-48b9-8f47-31fdc17f96f2" containerID="341c1936e330f828030c669721470a1b4f32ae40b8dcac77ca50332e1620a555" exitCode=0 Nov 24 08:21:36 crc kubenswrapper[4691]: I1124 08:21:36.008770 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l9px7" event={"ID":"6c439899-210e-48b9-8f47-31fdc17f96f2","Type":"ContainerDied","Data":"341c1936e330f828030c669721470a1b4f32ae40b8dcac77ca50332e1620a555"} Nov 24 08:21:36 crc kubenswrapper[4691]: I1124 08:21:36.575662 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92" Nov 24 08:21:36 crc kubenswrapper[4691]: I1124 08:21:36.656683 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m4vvn\" (UniqueName: \"kubernetes.io/projected/6199a668-e1b5-473b-8ff0-2fdf26b69c79-kube-api-access-m4vvn\") pod \"6199a668-e1b5-473b-8ff0-2fdf26b69c79\" (UID: \"6199a668-e1b5-473b-8ff0-2fdf26b69c79\") " Nov 24 08:21:36 crc kubenswrapper[4691]: I1124 08:21:36.656806 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6199a668-e1b5-473b-8ff0-2fdf26b69c79-bootstrap-combined-ca-bundle\") pod \"6199a668-e1b5-473b-8ff0-2fdf26b69c79\" (UID: \"6199a668-e1b5-473b-8ff0-2fdf26b69c79\") " Nov 24 08:21:36 crc kubenswrapper[4691]: I1124 08:21:36.656845 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6199a668-e1b5-473b-8ff0-2fdf26b69c79-inventory\") pod \"6199a668-e1b5-473b-8ff0-2fdf26b69c79\" (UID: \"6199a668-e1b5-473b-8ff0-2fdf26b69c79\") " Nov 24 08:21:36 crc kubenswrapper[4691]: I1124 08:21:36.656977 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6199a668-e1b5-473b-8ff0-2fdf26b69c79-ssh-key\") pod \"6199a668-e1b5-473b-8ff0-2fdf26b69c79\" (UID: \"6199a668-e1b5-473b-8ff0-2fdf26b69c79\") " Nov 24 08:21:36 crc kubenswrapper[4691]: I1124 08:21:36.664101 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6199a668-e1b5-473b-8ff0-2fdf26b69c79-kube-api-access-m4vvn" (OuterVolumeSpecName: "kube-api-access-m4vvn") pod "6199a668-e1b5-473b-8ff0-2fdf26b69c79" (UID: "6199a668-e1b5-473b-8ff0-2fdf26b69c79"). InnerVolumeSpecName "kube-api-access-m4vvn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:21:36 crc kubenswrapper[4691]: I1124 08:21:36.670648 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6199a668-e1b5-473b-8ff0-2fdf26b69c79-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "6199a668-e1b5-473b-8ff0-2fdf26b69c79" (UID: "6199a668-e1b5-473b-8ff0-2fdf26b69c79"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:21:36 crc kubenswrapper[4691]: I1124 08:21:36.693837 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6199a668-e1b5-473b-8ff0-2fdf26b69c79-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "6199a668-e1b5-473b-8ff0-2fdf26b69c79" (UID: "6199a668-e1b5-473b-8ff0-2fdf26b69c79"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:21:36 crc kubenswrapper[4691]: I1124 08:21:36.700704 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6199a668-e1b5-473b-8ff0-2fdf26b69c79-inventory" (OuterVolumeSpecName: "inventory") pod "6199a668-e1b5-473b-8ff0-2fdf26b69c79" (UID: "6199a668-e1b5-473b-8ff0-2fdf26b69c79"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:21:36 crc kubenswrapper[4691]: I1124 08:21:36.779274 4691 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6199a668-e1b5-473b-8ff0-2fdf26b69c79-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:21:36 crc kubenswrapper[4691]: I1124 08:21:36.779627 4691 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6199a668-e1b5-473b-8ff0-2fdf26b69c79-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 08:21:36 crc kubenswrapper[4691]: I1124 08:21:36.779644 4691 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6199a668-e1b5-473b-8ff0-2fdf26b69c79-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 08:21:36 crc kubenswrapper[4691]: I1124 08:21:36.779658 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m4vvn\" (UniqueName: \"kubernetes.io/projected/6199a668-e1b5-473b-8ff0-2fdf26b69c79-kube-api-access-m4vvn\") on node \"crc\" DevicePath \"\"" Nov 24 08:21:37 crc kubenswrapper[4691]: I1124 08:21:37.022782 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l9px7" event={"ID":"6c439899-210e-48b9-8f47-31fdc17f96f2","Type":"ContainerStarted","Data":"474f8b23afb4fe08d18d46d7831820b24cae9b430facb18dcdd278ba4a178548"} Nov 24 08:21:37 crc kubenswrapper[4691]: I1124 08:21:37.024583 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92" event={"ID":"6199a668-e1b5-473b-8ff0-2fdf26b69c79","Type":"ContainerDied","Data":"1a451c1f33c1dd9d9437492cec63cf15e4516dc3799f9fb01e6ddb402845d437"} Nov 24 08:21:37 crc kubenswrapper[4691]: I1124 08:21:37.024638 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1a451c1f33c1dd9d9437492cec63cf15e4516dc3799f9fb01e6ddb402845d437" Nov 24 08:21:37 crc kubenswrapper[4691]: I1124 08:21:37.024644 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92" Nov 24 08:21:37 crc kubenswrapper[4691]: I1124 08:21:37.055115 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-l9px7" podStartSLOduration=3.592095705 podStartE2EDuration="6.055097651s" podCreationTimestamp="2025-11-24 08:21:31 +0000 UTC" firstStartedPulling="2025-11-24 08:21:33.979587249 +0000 UTC m=+1455.978536488" lastFinishedPulling="2025-11-24 08:21:36.442589165 +0000 UTC m=+1458.441538434" observedRunningTime="2025-11-24 08:21:37.050909041 +0000 UTC m=+1459.049858290" watchObservedRunningTime="2025-11-24 08:21:37.055097651 +0000 UTC m=+1459.054046900" Nov 24 08:21:37 crc kubenswrapper[4691]: I1124 08:21:37.115094 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-svnp5"] Nov 24 08:21:37 crc kubenswrapper[4691]: E1124 08:21:37.115585 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6199a668-e1b5-473b-8ff0-2fdf26b69c79" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 24 08:21:37 crc kubenswrapper[4691]: I1124 08:21:37.115609 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="6199a668-e1b5-473b-8ff0-2fdf26b69c79" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 24 08:21:37 crc kubenswrapper[4691]: I1124 08:21:37.115885 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="6199a668-e1b5-473b-8ff0-2fdf26b69c79" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 24 08:21:37 crc kubenswrapper[4691]: I1124 08:21:37.116682 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-svnp5" Nov 24 08:21:37 crc kubenswrapper[4691]: I1124 08:21:37.118526 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 08:21:37 crc kubenswrapper[4691]: I1124 08:21:37.119120 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-l69jd" Nov 24 08:21:37 crc kubenswrapper[4691]: I1124 08:21:37.119222 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 08:21:37 crc kubenswrapper[4691]: I1124 08:21:37.120771 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 08:21:37 crc kubenswrapper[4691]: I1124 08:21:37.126335 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-svnp5"] Nov 24 08:21:37 crc kubenswrapper[4691]: I1124 08:21:37.288743 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/460ba73d-0917-4b4c-8ca1-141a72e6b3e4-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-svnp5\" (UID: \"460ba73d-0917-4b4c-8ca1-141a72e6b3e4\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-svnp5" Nov 24 08:21:37 crc kubenswrapper[4691]: I1124 08:21:37.288944 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dbf2z\" (UniqueName: \"kubernetes.io/projected/460ba73d-0917-4b4c-8ca1-141a72e6b3e4-kube-api-access-dbf2z\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-svnp5\" (UID: \"460ba73d-0917-4b4c-8ca1-141a72e6b3e4\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-svnp5" Nov 24 08:21:37 crc kubenswrapper[4691]: I1124 08:21:37.289014 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/460ba73d-0917-4b4c-8ca1-141a72e6b3e4-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-svnp5\" (UID: \"460ba73d-0917-4b4c-8ca1-141a72e6b3e4\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-svnp5" Nov 24 08:21:37 crc kubenswrapper[4691]: I1124 08:21:37.391396 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/460ba73d-0917-4b4c-8ca1-141a72e6b3e4-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-svnp5\" (UID: \"460ba73d-0917-4b4c-8ca1-141a72e6b3e4\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-svnp5" Nov 24 08:21:37 crc kubenswrapper[4691]: I1124 08:21:37.391652 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dbf2z\" (UniqueName: \"kubernetes.io/projected/460ba73d-0917-4b4c-8ca1-141a72e6b3e4-kube-api-access-dbf2z\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-svnp5\" (UID: \"460ba73d-0917-4b4c-8ca1-141a72e6b3e4\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-svnp5" Nov 24 08:21:37 crc kubenswrapper[4691]: I1124 08:21:37.391783 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/460ba73d-0917-4b4c-8ca1-141a72e6b3e4-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-svnp5\" (UID: \"460ba73d-0917-4b4c-8ca1-141a72e6b3e4\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-svnp5" Nov 24 08:21:37 crc kubenswrapper[4691]: I1124 08:21:37.397310 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/460ba73d-0917-4b4c-8ca1-141a72e6b3e4-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-svnp5\" (UID: \"460ba73d-0917-4b4c-8ca1-141a72e6b3e4\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-svnp5" Nov 24 08:21:37 crc kubenswrapper[4691]: I1124 08:21:37.400011 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/460ba73d-0917-4b4c-8ca1-141a72e6b3e4-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-svnp5\" (UID: \"460ba73d-0917-4b4c-8ca1-141a72e6b3e4\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-svnp5" Nov 24 08:21:37 crc kubenswrapper[4691]: I1124 08:21:37.411890 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dbf2z\" (UniqueName: \"kubernetes.io/projected/460ba73d-0917-4b4c-8ca1-141a72e6b3e4-kube-api-access-dbf2z\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-svnp5\" (UID: \"460ba73d-0917-4b4c-8ca1-141a72e6b3e4\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-svnp5" Nov 24 08:21:37 crc kubenswrapper[4691]: I1124 08:21:37.438405 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-svnp5" Nov 24 08:21:38 crc kubenswrapper[4691]: I1124 08:21:38.055084 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-svnp5"] Nov 24 08:21:39 crc kubenswrapper[4691]: I1124 08:21:39.063465 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-svnp5" event={"ID":"460ba73d-0917-4b4c-8ca1-141a72e6b3e4","Type":"ContainerStarted","Data":"3a02abe3e32e6808830813b17b047c89514a793f5176904eb578f5d580c8c9a1"} Nov 24 08:21:39 crc kubenswrapper[4691]: I1124 08:21:39.063795 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-svnp5" event={"ID":"460ba73d-0917-4b4c-8ca1-141a72e6b3e4","Type":"ContainerStarted","Data":"d06af14aa1436169f0b8568b96140ba93c4395d7ca1fd422447061db3c5a4a3d"} Nov 24 08:21:39 crc kubenswrapper[4691]: I1124 08:21:39.082713 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-svnp5" podStartSLOduration=1.6443428070000001 podStartE2EDuration="2.082691899s" podCreationTimestamp="2025-11-24 08:21:37 +0000 UTC" firstStartedPulling="2025-11-24 08:21:38.046819171 +0000 UTC m=+1460.045768420" lastFinishedPulling="2025-11-24 08:21:38.485168263 +0000 UTC m=+1460.484117512" observedRunningTime="2025-11-24 08:21:39.081820984 +0000 UTC m=+1461.080770313" watchObservedRunningTime="2025-11-24 08:21:39.082691899 +0000 UTC m=+1461.081641158" Nov 24 08:21:39 crc kubenswrapper[4691]: I1124 08:21:39.250687 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-hjmjm"] Nov 24 08:21:39 crc kubenswrapper[4691]: I1124 08:21:39.253116 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hjmjm" Nov 24 08:21:39 crc kubenswrapper[4691]: I1124 08:21:39.264656 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hjmjm"] Nov 24 08:21:39 crc kubenswrapper[4691]: I1124 08:21:39.439931 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb1e2157-b7e9-41d3-8ba2-2b3b287452a9-catalog-content\") pod \"community-operators-hjmjm\" (UID: \"eb1e2157-b7e9-41d3-8ba2-2b3b287452a9\") " pod="openshift-marketplace/community-operators-hjmjm" Nov 24 08:21:39 crc kubenswrapper[4691]: I1124 08:21:39.439999 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb1e2157-b7e9-41d3-8ba2-2b3b287452a9-utilities\") pod \"community-operators-hjmjm\" (UID: \"eb1e2157-b7e9-41d3-8ba2-2b3b287452a9\") " pod="openshift-marketplace/community-operators-hjmjm" Nov 24 08:21:39 crc kubenswrapper[4691]: I1124 08:21:39.440181 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x26vs\" (UniqueName: \"kubernetes.io/projected/eb1e2157-b7e9-41d3-8ba2-2b3b287452a9-kube-api-access-x26vs\") pod \"community-operators-hjmjm\" (UID: \"eb1e2157-b7e9-41d3-8ba2-2b3b287452a9\") " pod="openshift-marketplace/community-operators-hjmjm" Nov 24 08:21:39 crc kubenswrapper[4691]: I1124 08:21:39.541601 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb1e2157-b7e9-41d3-8ba2-2b3b287452a9-catalog-content\") pod \"community-operators-hjmjm\" (UID: \"eb1e2157-b7e9-41d3-8ba2-2b3b287452a9\") " pod="openshift-marketplace/community-operators-hjmjm" Nov 24 08:21:39 crc kubenswrapper[4691]: I1124 08:21:39.541659 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb1e2157-b7e9-41d3-8ba2-2b3b287452a9-utilities\") pod \"community-operators-hjmjm\" (UID: \"eb1e2157-b7e9-41d3-8ba2-2b3b287452a9\") " pod="openshift-marketplace/community-operators-hjmjm" Nov 24 08:21:39 crc kubenswrapper[4691]: I1124 08:21:39.541742 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x26vs\" (UniqueName: \"kubernetes.io/projected/eb1e2157-b7e9-41d3-8ba2-2b3b287452a9-kube-api-access-x26vs\") pod \"community-operators-hjmjm\" (UID: \"eb1e2157-b7e9-41d3-8ba2-2b3b287452a9\") " pod="openshift-marketplace/community-operators-hjmjm" Nov 24 08:21:39 crc kubenswrapper[4691]: I1124 08:21:39.542347 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb1e2157-b7e9-41d3-8ba2-2b3b287452a9-catalog-content\") pod \"community-operators-hjmjm\" (UID: \"eb1e2157-b7e9-41d3-8ba2-2b3b287452a9\") " pod="openshift-marketplace/community-operators-hjmjm" Nov 24 08:21:39 crc kubenswrapper[4691]: I1124 08:21:39.542516 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb1e2157-b7e9-41d3-8ba2-2b3b287452a9-utilities\") pod \"community-operators-hjmjm\" (UID: \"eb1e2157-b7e9-41d3-8ba2-2b3b287452a9\") " pod="openshift-marketplace/community-operators-hjmjm" Nov 24 08:21:39 crc kubenswrapper[4691]: I1124 08:21:39.564607 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x26vs\" (UniqueName: \"kubernetes.io/projected/eb1e2157-b7e9-41d3-8ba2-2b3b287452a9-kube-api-access-x26vs\") pod \"community-operators-hjmjm\" (UID: \"eb1e2157-b7e9-41d3-8ba2-2b3b287452a9\") " pod="openshift-marketplace/community-operators-hjmjm" Nov 24 08:21:39 crc kubenswrapper[4691]: I1124 08:21:39.585141 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hjmjm" Nov 24 08:21:40 crc kubenswrapper[4691]: I1124 08:21:40.196735 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hjmjm"] Nov 24 08:21:41 crc kubenswrapper[4691]: I1124 08:21:41.090512 4691 generic.go:334] "Generic (PLEG): container finished" podID="eb1e2157-b7e9-41d3-8ba2-2b3b287452a9" containerID="4fdf1c91163776884fb814087e15db6950aa7e605fd7d9ed20c217926ef8b5a6" exitCode=0 Nov 24 08:21:41 crc kubenswrapper[4691]: I1124 08:21:41.090582 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hjmjm" event={"ID":"eb1e2157-b7e9-41d3-8ba2-2b3b287452a9","Type":"ContainerDied","Data":"4fdf1c91163776884fb814087e15db6950aa7e605fd7d9ed20c217926ef8b5a6"} Nov 24 08:21:41 crc kubenswrapper[4691]: I1124 08:21:41.090627 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hjmjm" event={"ID":"eb1e2157-b7e9-41d3-8ba2-2b3b287452a9","Type":"ContainerStarted","Data":"4951f44ede49334b677f1a28a21030880db96ac6378c589488510b4a5f1b6c3e"} Nov 24 08:21:42 crc kubenswrapper[4691]: I1124 08:21:42.100399 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hjmjm" event={"ID":"eb1e2157-b7e9-41d3-8ba2-2b3b287452a9","Type":"ContainerStarted","Data":"14530cffb2900b77c58d7c4a7cdb9fd18084e290f3e553fbdbdc2229a4b651ed"} Nov 24 08:21:42 crc kubenswrapper[4691]: I1124 08:21:42.180639 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-l9px7" Nov 24 08:21:42 crc kubenswrapper[4691]: I1124 08:21:42.180961 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-l9px7" Nov 24 08:21:42 crc kubenswrapper[4691]: I1124 08:21:42.251802 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-l9px7" Nov 24 08:21:43 crc kubenswrapper[4691]: I1124 08:21:43.114313 4691 generic.go:334] "Generic (PLEG): container finished" podID="eb1e2157-b7e9-41d3-8ba2-2b3b287452a9" containerID="14530cffb2900b77c58d7c4a7cdb9fd18084e290f3e553fbdbdc2229a4b651ed" exitCode=0 Nov 24 08:21:43 crc kubenswrapper[4691]: I1124 08:21:43.114389 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hjmjm" event={"ID":"eb1e2157-b7e9-41d3-8ba2-2b3b287452a9","Type":"ContainerDied","Data":"14530cffb2900b77c58d7c4a7cdb9fd18084e290f3e553fbdbdc2229a4b651ed"} Nov 24 08:21:43 crc kubenswrapper[4691]: I1124 08:21:43.180086 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-l9px7" Nov 24 08:21:44 crc kubenswrapper[4691]: I1124 08:21:44.135653 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hjmjm" event={"ID":"eb1e2157-b7e9-41d3-8ba2-2b3b287452a9","Type":"ContainerStarted","Data":"509cbedaa48beb700639207f44da547a842483a773f4d7eb7336914a3e444af9"} Nov 24 08:21:44 crc kubenswrapper[4691]: I1124 08:21:44.614334 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-hjmjm" podStartSLOduration=3.133232564 podStartE2EDuration="5.614285007s" podCreationTimestamp="2025-11-24 08:21:39 +0000 UTC" firstStartedPulling="2025-11-24 08:21:41.095141613 +0000 UTC m=+1463.094090852" lastFinishedPulling="2025-11-24 08:21:43.576194046 +0000 UTC m=+1465.575143295" observedRunningTime="2025-11-24 08:21:44.161719419 +0000 UTC m=+1466.160668678" watchObservedRunningTime="2025-11-24 08:21:44.614285007 +0000 UTC m=+1466.613234276" Nov 24 08:21:44 crc kubenswrapper[4691]: I1124 08:21:44.616440 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-l9px7"] Nov 24 08:21:46 crc kubenswrapper[4691]: I1124 08:21:46.151405 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-l9px7" podUID="6c439899-210e-48b9-8f47-31fdc17f96f2" containerName="registry-server" containerID="cri-o://474f8b23afb4fe08d18d46d7831820b24cae9b430facb18dcdd278ba4a178548" gracePeriod=2 Nov 24 08:21:46 crc kubenswrapper[4691]: I1124 08:21:46.870947 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l9px7" Nov 24 08:21:46 crc kubenswrapper[4691]: I1124 08:21:46.891597 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6c439899-210e-48b9-8f47-31fdc17f96f2-utilities\") pod \"6c439899-210e-48b9-8f47-31fdc17f96f2\" (UID: \"6c439899-210e-48b9-8f47-31fdc17f96f2\") " Nov 24 08:21:46 crc kubenswrapper[4691]: I1124 08:21:46.891816 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4ddrt\" (UniqueName: \"kubernetes.io/projected/6c439899-210e-48b9-8f47-31fdc17f96f2-kube-api-access-4ddrt\") pod \"6c439899-210e-48b9-8f47-31fdc17f96f2\" (UID: \"6c439899-210e-48b9-8f47-31fdc17f96f2\") " Nov 24 08:21:46 crc kubenswrapper[4691]: I1124 08:21:46.891878 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6c439899-210e-48b9-8f47-31fdc17f96f2-catalog-content\") pod \"6c439899-210e-48b9-8f47-31fdc17f96f2\" (UID: \"6c439899-210e-48b9-8f47-31fdc17f96f2\") " Nov 24 08:21:46 crc kubenswrapper[4691]: I1124 08:21:46.895424 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6c439899-210e-48b9-8f47-31fdc17f96f2-utilities" (OuterVolumeSpecName: "utilities") pod "6c439899-210e-48b9-8f47-31fdc17f96f2" (UID: "6c439899-210e-48b9-8f47-31fdc17f96f2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:21:46 crc kubenswrapper[4691]: I1124 08:21:46.900621 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c439899-210e-48b9-8f47-31fdc17f96f2-kube-api-access-4ddrt" (OuterVolumeSpecName: "kube-api-access-4ddrt") pod "6c439899-210e-48b9-8f47-31fdc17f96f2" (UID: "6c439899-210e-48b9-8f47-31fdc17f96f2"). InnerVolumeSpecName "kube-api-access-4ddrt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:21:46 crc kubenswrapper[4691]: I1124 08:21:46.953394 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6c439899-210e-48b9-8f47-31fdc17f96f2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6c439899-210e-48b9-8f47-31fdc17f96f2" (UID: "6c439899-210e-48b9-8f47-31fdc17f96f2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:21:46 crc kubenswrapper[4691]: I1124 08:21:46.993824 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4ddrt\" (UniqueName: \"kubernetes.io/projected/6c439899-210e-48b9-8f47-31fdc17f96f2-kube-api-access-4ddrt\") on node \"crc\" DevicePath \"\"" Nov 24 08:21:46 crc kubenswrapper[4691]: I1124 08:21:46.993877 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6c439899-210e-48b9-8f47-31fdc17f96f2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 08:21:46 crc kubenswrapper[4691]: I1124 08:21:46.993893 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6c439899-210e-48b9-8f47-31fdc17f96f2-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 08:21:47 crc kubenswrapper[4691]: I1124 08:21:47.163567 4691 generic.go:334] "Generic (PLEG): container finished" podID="6c439899-210e-48b9-8f47-31fdc17f96f2" containerID="474f8b23afb4fe08d18d46d7831820b24cae9b430facb18dcdd278ba4a178548" exitCode=0 Nov 24 08:21:47 crc kubenswrapper[4691]: I1124 08:21:47.163615 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l9px7" event={"ID":"6c439899-210e-48b9-8f47-31fdc17f96f2","Type":"ContainerDied","Data":"474f8b23afb4fe08d18d46d7831820b24cae9b430facb18dcdd278ba4a178548"} Nov 24 08:21:47 crc kubenswrapper[4691]: I1124 08:21:47.163659 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l9px7" event={"ID":"6c439899-210e-48b9-8f47-31fdc17f96f2","Type":"ContainerDied","Data":"ea2177d6a59ac9b7b69b2dd107bdd6b84a9a08814341918036fa6f982f6507e5"} Nov 24 08:21:47 crc kubenswrapper[4691]: I1124 08:21:47.163682 4691 scope.go:117] "RemoveContainer" containerID="474f8b23afb4fe08d18d46d7831820b24cae9b430facb18dcdd278ba4a178548" Nov 24 08:21:47 crc kubenswrapper[4691]: I1124 08:21:47.163731 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l9px7" Nov 24 08:21:47 crc kubenswrapper[4691]: I1124 08:21:47.201624 4691 scope.go:117] "RemoveContainer" containerID="341c1936e330f828030c669721470a1b4f32ae40b8dcac77ca50332e1620a555" Nov 24 08:21:47 crc kubenswrapper[4691]: I1124 08:21:47.219760 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-l9px7"] Nov 24 08:21:47 crc kubenswrapper[4691]: I1124 08:21:47.232874 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-l9px7"] Nov 24 08:21:47 crc kubenswrapper[4691]: I1124 08:21:47.246847 4691 scope.go:117] "RemoveContainer" containerID="53143265373ebc4c9a32314d5e791bf4df320cdd0c544cfe6d6f7c96c6342b88" Nov 24 08:21:47 crc kubenswrapper[4691]: I1124 08:21:47.319785 4691 scope.go:117] "RemoveContainer" containerID="474f8b23afb4fe08d18d46d7831820b24cae9b430facb18dcdd278ba4a178548" Nov 24 08:21:47 crc kubenswrapper[4691]: E1124 08:21:47.320629 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"474f8b23afb4fe08d18d46d7831820b24cae9b430facb18dcdd278ba4a178548\": container with ID starting with 474f8b23afb4fe08d18d46d7831820b24cae9b430facb18dcdd278ba4a178548 not found: ID does not exist" containerID="474f8b23afb4fe08d18d46d7831820b24cae9b430facb18dcdd278ba4a178548" Nov 24 08:21:47 crc kubenswrapper[4691]: I1124 08:21:47.320664 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"474f8b23afb4fe08d18d46d7831820b24cae9b430facb18dcdd278ba4a178548"} err="failed to get container status \"474f8b23afb4fe08d18d46d7831820b24cae9b430facb18dcdd278ba4a178548\": rpc error: code = NotFound desc = could not find container \"474f8b23afb4fe08d18d46d7831820b24cae9b430facb18dcdd278ba4a178548\": container with ID starting with 474f8b23afb4fe08d18d46d7831820b24cae9b430facb18dcdd278ba4a178548 not found: ID does not exist" Nov 24 08:21:47 crc kubenswrapper[4691]: I1124 08:21:47.320685 4691 scope.go:117] "RemoveContainer" containerID="341c1936e330f828030c669721470a1b4f32ae40b8dcac77ca50332e1620a555" Nov 24 08:21:47 crc kubenswrapper[4691]: E1124 08:21:47.321232 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"341c1936e330f828030c669721470a1b4f32ae40b8dcac77ca50332e1620a555\": container with ID starting with 341c1936e330f828030c669721470a1b4f32ae40b8dcac77ca50332e1620a555 not found: ID does not exist" containerID="341c1936e330f828030c669721470a1b4f32ae40b8dcac77ca50332e1620a555" Nov 24 08:21:47 crc kubenswrapper[4691]: I1124 08:21:47.321338 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"341c1936e330f828030c669721470a1b4f32ae40b8dcac77ca50332e1620a555"} err="failed to get container status \"341c1936e330f828030c669721470a1b4f32ae40b8dcac77ca50332e1620a555\": rpc error: code = NotFound desc = could not find container \"341c1936e330f828030c669721470a1b4f32ae40b8dcac77ca50332e1620a555\": container with ID starting with 341c1936e330f828030c669721470a1b4f32ae40b8dcac77ca50332e1620a555 not found: ID does not exist" Nov 24 08:21:47 crc kubenswrapper[4691]: I1124 08:21:47.321485 4691 scope.go:117] "RemoveContainer" containerID="53143265373ebc4c9a32314d5e791bf4df320cdd0c544cfe6d6f7c96c6342b88" Nov 24 08:21:47 crc kubenswrapper[4691]: E1124 08:21:47.321997 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"53143265373ebc4c9a32314d5e791bf4df320cdd0c544cfe6d6f7c96c6342b88\": container with ID starting with 53143265373ebc4c9a32314d5e791bf4df320cdd0c544cfe6d6f7c96c6342b88 not found: ID does not exist" containerID="53143265373ebc4c9a32314d5e791bf4df320cdd0c544cfe6d6f7c96c6342b88" Nov 24 08:21:47 crc kubenswrapper[4691]: I1124 08:21:47.322026 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"53143265373ebc4c9a32314d5e791bf4df320cdd0c544cfe6d6f7c96c6342b88"} err="failed to get container status \"53143265373ebc4c9a32314d5e791bf4df320cdd0c544cfe6d6f7c96c6342b88\": rpc error: code = NotFound desc = could not find container \"53143265373ebc4c9a32314d5e791bf4df320cdd0c544cfe6d6f7c96c6342b88\": container with ID starting with 53143265373ebc4c9a32314d5e791bf4df320cdd0c544cfe6d6f7c96c6342b88 not found: ID does not exist" Nov 24 08:21:48 crc kubenswrapper[4691]: I1124 08:21:48.779227 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c439899-210e-48b9-8f47-31fdc17f96f2" path="/var/lib/kubelet/pods/6c439899-210e-48b9-8f47-31fdc17f96f2/volumes" Nov 24 08:21:49 crc kubenswrapper[4691]: I1124 08:21:49.585839 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-hjmjm" Nov 24 08:21:49 crc kubenswrapper[4691]: I1124 08:21:49.586463 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-hjmjm" Nov 24 08:21:49 crc kubenswrapper[4691]: I1124 08:21:49.650553 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-hjmjm" Nov 24 08:21:50 crc kubenswrapper[4691]: I1124 08:21:50.258576 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-hjmjm" Nov 24 08:21:50 crc kubenswrapper[4691]: I1124 08:21:50.822982 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hjmjm"] Nov 24 08:21:51 crc kubenswrapper[4691]: I1124 08:21:51.089316 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:21:51 crc kubenswrapper[4691]: I1124 08:21:51.089688 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:21:52 crc kubenswrapper[4691]: I1124 08:21:52.225318 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-hjmjm" podUID="eb1e2157-b7e9-41d3-8ba2-2b3b287452a9" containerName="registry-server" containerID="cri-o://509cbedaa48beb700639207f44da547a842483a773f4d7eb7336914a3e444af9" gracePeriod=2 Nov 24 08:21:52 crc kubenswrapper[4691]: I1124 08:21:52.684593 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hjmjm" Nov 24 08:21:52 crc kubenswrapper[4691]: I1124 08:21:52.834641 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x26vs\" (UniqueName: \"kubernetes.io/projected/eb1e2157-b7e9-41d3-8ba2-2b3b287452a9-kube-api-access-x26vs\") pod \"eb1e2157-b7e9-41d3-8ba2-2b3b287452a9\" (UID: \"eb1e2157-b7e9-41d3-8ba2-2b3b287452a9\") " Nov 24 08:21:52 crc kubenswrapper[4691]: I1124 08:21:52.834746 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb1e2157-b7e9-41d3-8ba2-2b3b287452a9-utilities\") pod \"eb1e2157-b7e9-41d3-8ba2-2b3b287452a9\" (UID: \"eb1e2157-b7e9-41d3-8ba2-2b3b287452a9\") " Nov 24 08:21:52 crc kubenswrapper[4691]: I1124 08:21:52.834830 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb1e2157-b7e9-41d3-8ba2-2b3b287452a9-catalog-content\") pod \"eb1e2157-b7e9-41d3-8ba2-2b3b287452a9\" (UID: \"eb1e2157-b7e9-41d3-8ba2-2b3b287452a9\") " Nov 24 08:21:52 crc kubenswrapper[4691]: I1124 08:21:52.836367 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eb1e2157-b7e9-41d3-8ba2-2b3b287452a9-utilities" (OuterVolumeSpecName: "utilities") pod "eb1e2157-b7e9-41d3-8ba2-2b3b287452a9" (UID: "eb1e2157-b7e9-41d3-8ba2-2b3b287452a9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:21:52 crc kubenswrapper[4691]: I1124 08:21:52.842862 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb1e2157-b7e9-41d3-8ba2-2b3b287452a9-kube-api-access-x26vs" (OuterVolumeSpecName: "kube-api-access-x26vs") pod "eb1e2157-b7e9-41d3-8ba2-2b3b287452a9" (UID: "eb1e2157-b7e9-41d3-8ba2-2b3b287452a9"). InnerVolumeSpecName "kube-api-access-x26vs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:21:52 crc kubenswrapper[4691]: I1124 08:21:52.889631 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eb1e2157-b7e9-41d3-8ba2-2b3b287452a9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "eb1e2157-b7e9-41d3-8ba2-2b3b287452a9" (UID: "eb1e2157-b7e9-41d3-8ba2-2b3b287452a9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:21:52 crc kubenswrapper[4691]: I1124 08:21:52.938339 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x26vs\" (UniqueName: \"kubernetes.io/projected/eb1e2157-b7e9-41d3-8ba2-2b3b287452a9-kube-api-access-x26vs\") on node \"crc\" DevicePath \"\"" Nov 24 08:21:52 crc kubenswrapper[4691]: I1124 08:21:52.938397 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb1e2157-b7e9-41d3-8ba2-2b3b287452a9-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 08:21:52 crc kubenswrapper[4691]: I1124 08:21:52.938410 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb1e2157-b7e9-41d3-8ba2-2b3b287452a9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 08:21:53 crc kubenswrapper[4691]: I1124 08:21:53.236608 4691 generic.go:334] "Generic (PLEG): container finished" podID="eb1e2157-b7e9-41d3-8ba2-2b3b287452a9" containerID="509cbedaa48beb700639207f44da547a842483a773f4d7eb7336914a3e444af9" exitCode=0 Nov 24 08:21:53 crc kubenswrapper[4691]: I1124 08:21:53.236680 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hjmjm" event={"ID":"eb1e2157-b7e9-41d3-8ba2-2b3b287452a9","Type":"ContainerDied","Data":"509cbedaa48beb700639207f44da547a842483a773f4d7eb7336914a3e444af9"} Nov 24 08:21:53 crc kubenswrapper[4691]: I1124 08:21:53.236701 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hjmjm" Nov 24 08:21:53 crc kubenswrapper[4691]: I1124 08:21:53.236736 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hjmjm" event={"ID":"eb1e2157-b7e9-41d3-8ba2-2b3b287452a9","Type":"ContainerDied","Data":"4951f44ede49334b677f1a28a21030880db96ac6378c589488510b4a5f1b6c3e"} Nov 24 08:21:53 crc kubenswrapper[4691]: I1124 08:21:53.236758 4691 scope.go:117] "RemoveContainer" containerID="509cbedaa48beb700639207f44da547a842483a773f4d7eb7336914a3e444af9" Nov 24 08:21:53 crc kubenswrapper[4691]: I1124 08:21:53.272473 4691 scope.go:117] "RemoveContainer" containerID="14530cffb2900b77c58d7c4a7cdb9fd18084e290f3e553fbdbdc2229a4b651ed" Nov 24 08:21:53 crc kubenswrapper[4691]: I1124 08:21:53.276809 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hjmjm"] Nov 24 08:21:53 crc kubenswrapper[4691]: I1124 08:21:53.287349 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-hjmjm"] Nov 24 08:21:53 crc kubenswrapper[4691]: I1124 08:21:53.292334 4691 scope.go:117] "RemoveContainer" containerID="4fdf1c91163776884fb814087e15db6950aa7e605fd7d9ed20c217926ef8b5a6" Nov 24 08:21:53 crc kubenswrapper[4691]: I1124 08:21:53.353989 4691 scope.go:117] "RemoveContainer" containerID="509cbedaa48beb700639207f44da547a842483a773f4d7eb7336914a3e444af9" Nov 24 08:21:53 crc kubenswrapper[4691]: E1124 08:21:53.354864 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"509cbedaa48beb700639207f44da547a842483a773f4d7eb7336914a3e444af9\": container with ID starting with 509cbedaa48beb700639207f44da547a842483a773f4d7eb7336914a3e444af9 not found: ID does not exist" containerID="509cbedaa48beb700639207f44da547a842483a773f4d7eb7336914a3e444af9" Nov 24 08:21:53 crc kubenswrapper[4691]: I1124 08:21:53.354919 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"509cbedaa48beb700639207f44da547a842483a773f4d7eb7336914a3e444af9"} err="failed to get container status \"509cbedaa48beb700639207f44da547a842483a773f4d7eb7336914a3e444af9\": rpc error: code = NotFound desc = could not find container \"509cbedaa48beb700639207f44da547a842483a773f4d7eb7336914a3e444af9\": container with ID starting with 509cbedaa48beb700639207f44da547a842483a773f4d7eb7336914a3e444af9 not found: ID does not exist" Nov 24 08:21:53 crc kubenswrapper[4691]: I1124 08:21:53.354949 4691 scope.go:117] "RemoveContainer" containerID="14530cffb2900b77c58d7c4a7cdb9fd18084e290f3e553fbdbdc2229a4b651ed" Nov 24 08:21:53 crc kubenswrapper[4691]: E1124 08:21:53.355509 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"14530cffb2900b77c58d7c4a7cdb9fd18084e290f3e553fbdbdc2229a4b651ed\": container with ID starting with 14530cffb2900b77c58d7c4a7cdb9fd18084e290f3e553fbdbdc2229a4b651ed not found: ID does not exist" containerID="14530cffb2900b77c58d7c4a7cdb9fd18084e290f3e553fbdbdc2229a4b651ed" Nov 24 08:21:53 crc kubenswrapper[4691]: I1124 08:21:53.355580 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"14530cffb2900b77c58d7c4a7cdb9fd18084e290f3e553fbdbdc2229a4b651ed"} err="failed to get container status \"14530cffb2900b77c58d7c4a7cdb9fd18084e290f3e553fbdbdc2229a4b651ed\": rpc error: code = NotFound desc = could not find container \"14530cffb2900b77c58d7c4a7cdb9fd18084e290f3e553fbdbdc2229a4b651ed\": container with ID starting with 14530cffb2900b77c58d7c4a7cdb9fd18084e290f3e553fbdbdc2229a4b651ed not found: ID does not exist" Nov 24 08:21:53 crc kubenswrapper[4691]: I1124 08:21:53.355634 4691 scope.go:117] "RemoveContainer" containerID="4fdf1c91163776884fb814087e15db6950aa7e605fd7d9ed20c217926ef8b5a6" Nov 24 08:21:53 crc kubenswrapper[4691]: E1124 08:21:53.356007 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4fdf1c91163776884fb814087e15db6950aa7e605fd7d9ed20c217926ef8b5a6\": container with ID starting with 4fdf1c91163776884fb814087e15db6950aa7e605fd7d9ed20c217926ef8b5a6 not found: ID does not exist" containerID="4fdf1c91163776884fb814087e15db6950aa7e605fd7d9ed20c217926ef8b5a6" Nov 24 08:21:53 crc kubenswrapper[4691]: I1124 08:21:53.356037 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4fdf1c91163776884fb814087e15db6950aa7e605fd7d9ed20c217926ef8b5a6"} err="failed to get container status \"4fdf1c91163776884fb814087e15db6950aa7e605fd7d9ed20c217926ef8b5a6\": rpc error: code = NotFound desc = could not find container \"4fdf1c91163776884fb814087e15db6950aa7e605fd7d9ed20c217926ef8b5a6\": container with ID starting with 4fdf1c91163776884fb814087e15db6950aa7e605fd7d9ed20c217926ef8b5a6 not found: ID does not exist" Nov 24 08:21:54 crc kubenswrapper[4691]: I1124 08:21:54.772314 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb1e2157-b7e9-41d3-8ba2-2b3b287452a9" path="/var/lib/kubelet/pods/eb1e2157-b7e9-41d3-8ba2-2b3b287452a9/volumes" Nov 24 08:22:21 crc kubenswrapper[4691]: I1124 08:22:21.089823 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:22:21 crc kubenswrapper[4691]: I1124 08:22:21.090896 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:22:21 crc kubenswrapper[4691]: I1124 08:22:21.193746 4691 scope.go:117] "RemoveContainer" containerID="edd7a88c4e0f7e64512fe3a49c370291936f7b49577ab2ac28edbedc6a17ae88" Nov 24 08:22:21 crc kubenswrapper[4691]: I1124 08:22:21.222665 4691 scope.go:117] "RemoveContainer" containerID="7ba747b7cfed0b02381efd4adfc3bf0b4cf1cf8c47ae0f637e1ff446cff4054e" Nov 24 08:22:29 crc kubenswrapper[4691]: I1124 08:22:29.059025 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-0a0f-account-create-trpcp"] Nov 24 08:22:29 crc kubenswrapper[4691]: I1124 08:22:29.068901 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-mjqw5"] Nov 24 08:22:29 crc kubenswrapper[4691]: I1124 08:22:29.082245 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-7539-account-create-wnbqm"] Nov 24 08:22:29 crc kubenswrapper[4691]: I1124 08:22:29.091329 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-0a0f-account-create-trpcp"] Nov 24 08:22:29 crc kubenswrapper[4691]: I1124 08:22:29.108889 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-mjqw5"] Nov 24 08:22:29 crc kubenswrapper[4691]: I1124 08:22:29.131818 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-7539-account-create-wnbqm"] Nov 24 08:22:30 crc kubenswrapper[4691]: I1124 08:22:30.044226 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-q7ttd"] Nov 24 08:22:30 crc kubenswrapper[4691]: I1124 08:22:30.060027 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-5559c"] Nov 24 08:22:30 crc kubenswrapper[4691]: I1124 08:22:30.073521 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-b277-account-create-bksjl"] Nov 24 08:22:30 crc kubenswrapper[4691]: I1124 08:22:30.081735 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-q7ttd"] Nov 24 08:22:30 crc kubenswrapper[4691]: I1124 08:22:30.089107 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-5559c"] Nov 24 08:22:30 crc kubenswrapper[4691]: I1124 08:22:30.095904 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-b277-account-create-bksjl"] Nov 24 08:22:30 crc kubenswrapper[4691]: I1124 08:22:30.772690 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0532c6d6-54c8-4920-856d-049cbc33863f" path="/var/lib/kubelet/pods/0532c6d6-54c8-4920-856d-049cbc33863f/volumes" Nov 24 08:22:30 crc kubenswrapper[4691]: I1124 08:22:30.773335 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="070a2b47-984b-4039-b05b-1953eec94bad" path="/var/lib/kubelet/pods/070a2b47-984b-4039-b05b-1953eec94bad/volumes" Nov 24 08:22:30 crc kubenswrapper[4691]: I1124 08:22:30.774004 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6cae1b4e-964b-4f74-8ace-ec10292243fb" path="/var/lib/kubelet/pods/6cae1b4e-964b-4f74-8ace-ec10292243fb/volumes" Nov 24 08:22:30 crc kubenswrapper[4691]: I1124 08:22:30.774672 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="74a59460-c08c-46eb-97c1-07609f197dee" path="/var/lib/kubelet/pods/74a59460-c08c-46eb-97c1-07609f197dee/volumes" Nov 24 08:22:30 crc kubenswrapper[4691]: I1124 08:22:30.775897 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8982bed2-9351-43d6-964d-85d5aa0003a7" path="/var/lib/kubelet/pods/8982bed2-9351-43d6-964d-85d5aa0003a7/volumes" Nov 24 08:22:30 crc kubenswrapper[4691]: I1124 08:22:30.776559 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c0b5edb1-0fd7-4165-9644-30ec96e3bf88" path="/var/lib/kubelet/pods/c0b5edb1-0fd7-4165-9644-30ec96e3bf88/volumes" Nov 24 08:22:47 crc kubenswrapper[4691]: I1124 08:22:47.244579 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-hgt8c"] Nov 24 08:22:47 crc kubenswrapper[4691]: E1124 08:22:47.245561 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c439899-210e-48b9-8f47-31fdc17f96f2" containerName="extract-content" Nov 24 08:22:47 crc kubenswrapper[4691]: I1124 08:22:47.245579 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c439899-210e-48b9-8f47-31fdc17f96f2" containerName="extract-content" Nov 24 08:22:47 crc kubenswrapper[4691]: E1124 08:22:47.245600 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb1e2157-b7e9-41d3-8ba2-2b3b287452a9" containerName="registry-server" Nov 24 08:22:47 crc kubenswrapper[4691]: I1124 08:22:47.245606 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb1e2157-b7e9-41d3-8ba2-2b3b287452a9" containerName="registry-server" Nov 24 08:22:47 crc kubenswrapper[4691]: E1124 08:22:47.245626 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb1e2157-b7e9-41d3-8ba2-2b3b287452a9" containerName="extract-utilities" Nov 24 08:22:47 crc kubenswrapper[4691]: I1124 08:22:47.245634 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb1e2157-b7e9-41d3-8ba2-2b3b287452a9" containerName="extract-utilities" Nov 24 08:22:47 crc kubenswrapper[4691]: E1124 08:22:47.245642 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb1e2157-b7e9-41d3-8ba2-2b3b287452a9" containerName="extract-content" Nov 24 08:22:47 crc kubenswrapper[4691]: I1124 08:22:47.245649 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb1e2157-b7e9-41d3-8ba2-2b3b287452a9" containerName="extract-content" Nov 24 08:22:47 crc kubenswrapper[4691]: E1124 08:22:47.245668 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c439899-210e-48b9-8f47-31fdc17f96f2" containerName="registry-server" Nov 24 08:22:47 crc kubenswrapper[4691]: I1124 08:22:47.245674 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c439899-210e-48b9-8f47-31fdc17f96f2" containerName="registry-server" Nov 24 08:22:47 crc kubenswrapper[4691]: E1124 08:22:47.245684 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c439899-210e-48b9-8f47-31fdc17f96f2" containerName="extract-utilities" Nov 24 08:22:47 crc kubenswrapper[4691]: I1124 08:22:47.245690 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c439899-210e-48b9-8f47-31fdc17f96f2" containerName="extract-utilities" Nov 24 08:22:47 crc kubenswrapper[4691]: I1124 08:22:47.245856 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c439899-210e-48b9-8f47-31fdc17f96f2" containerName="registry-server" Nov 24 08:22:47 crc kubenswrapper[4691]: I1124 08:22:47.245875 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb1e2157-b7e9-41d3-8ba2-2b3b287452a9" containerName="registry-server" Nov 24 08:22:47 crc kubenswrapper[4691]: I1124 08:22:47.247402 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hgt8c" Nov 24 08:22:47 crc kubenswrapper[4691]: I1124 08:22:47.320771 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hgt8c"] Nov 24 08:22:47 crc kubenswrapper[4691]: I1124 08:22:47.355213 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b572769-5de8-4008-8e9a-5b051438b6b4-catalog-content\") pod \"redhat-marketplace-hgt8c\" (UID: \"6b572769-5de8-4008-8e9a-5b051438b6b4\") " pod="openshift-marketplace/redhat-marketplace-hgt8c" Nov 24 08:22:47 crc kubenswrapper[4691]: I1124 08:22:47.355300 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bmrzx\" (UniqueName: \"kubernetes.io/projected/6b572769-5de8-4008-8e9a-5b051438b6b4-kube-api-access-bmrzx\") pod \"redhat-marketplace-hgt8c\" (UID: \"6b572769-5de8-4008-8e9a-5b051438b6b4\") " pod="openshift-marketplace/redhat-marketplace-hgt8c" Nov 24 08:22:47 crc kubenswrapper[4691]: I1124 08:22:47.355551 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b572769-5de8-4008-8e9a-5b051438b6b4-utilities\") pod \"redhat-marketplace-hgt8c\" (UID: \"6b572769-5de8-4008-8e9a-5b051438b6b4\") " pod="openshift-marketplace/redhat-marketplace-hgt8c" Nov 24 08:22:47 crc kubenswrapper[4691]: I1124 08:22:47.457025 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b572769-5de8-4008-8e9a-5b051438b6b4-utilities\") pod \"redhat-marketplace-hgt8c\" (UID: \"6b572769-5de8-4008-8e9a-5b051438b6b4\") " pod="openshift-marketplace/redhat-marketplace-hgt8c" Nov 24 08:22:47 crc kubenswrapper[4691]: I1124 08:22:47.457209 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b572769-5de8-4008-8e9a-5b051438b6b4-catalog-content\") pod \"redhat-marketplace-hgt8c\" (UID: \"6b572769-5de8-4008-8e9a-5b051438b6b4\") " pod="openshift-marketplace/redhat-marketplace-hgt8c" Nov 24 08:22:47 crc kubenswrapper[4691]: I1124 08:22:47.457251 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bmrzx\" (UniqueName: \"kubernetes.io/projected/6b572769-5de8-4008-8e9a-5b051438b6b4-kube-api-access-bmrzx\") pod \"redhat-marketplace-hgt8c\" (UID: \"6b572769-5de8-4008-8e9a-5b051438b6b4\") " pod="openshift-marketplace/redhat-marketplace-hgt8c" Nov 24 08:22:47 crc kubenswrapper[4691]: I1124 08:22:47.457691 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b572769-5de8-4008-8e9a-5b051438b6b4-utilities\") pod \"redhat-marketplace-hgt8c\" (UID: \"6b572769-5de8-4008-8e9a-5b051438b6b4\") " pod="openshift-marketplace/redhat-marketplace-hgt8c" Nov 24 08:22:47 crc kubenswrapper[4691]: I1124 08:22:47.457735 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b572769-5de8-4008-8e9a-5b051438b6b4-catalog-content\") pod \"redhat-marketplace-hgt8c\" (UID: \"6b572769-5de8-4008-8e9a-5b051438b6b4\") " pod="openshift-marketplace/redhat-marketplace-hgt8c" Nov 24 08:22:47 crc kubenswrapper[4691]: I1124 08:22:47.480698 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bmrzx\" (UniqueName: \"kubernetes.io/projected/6b572769-5de8-4008-8e9a-5b051438b6b4-kube-api-access-bmrzx\") pod \"redhat-marketplace-hgt8c\" (UID: \"6b572769-5de8-4008-8e9a-5b051438b6b4\") " pod="openshift-marketplace/redhat-marketplace-hgt8c" Nov 24 08:22:47 crc kubenswrapper[4691]: I1124 08:22:47.635711 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hgt8c" Nov 24 08:22:48 crc kubenswrapper[4691]: I1124 08:22:48.095503 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hgt8c"] Nov 24 08:22:48 crc kubenswrapper[4691]: I1124 08:22:48.193337 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hgt8c" event={"ID":"6b572769-5de8-4008-8e9a-5b051438b6b4","Type":"ContainerStarted","Data":"e5911d0f814b934e9935da5fef8fe18026308af03f90c455251ff05310a36c3f"} Nov 24 08:22:49 crc kubenswrapper[4691]: I1124 08:22:49.209339 4691 generic.go:334] "Generic (PLEG): container finished" podID="6b572769-5de8-4008-8e9a-5b051438b6b4" containerID="28c70d8cfdd7ff92d8f1f59a40848f1c4b5ca15a2f392647d65e1199008eaaff" exitCode=0 Nov 24 08:22:49 crc kubenswrapper[4691]: I1124 08:22:49.209407 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hgt8c" event={"ID":"6b572769-5de8-4008-8e9a-5b051438b6b4","Type":"ContainerDied","Data":"28c70d8cfdd7ff92d8f1f59a40848f1c4b5ca15a2f392647d65e1199008eaaff"} Nov 24 08:22:50 crc kubenswrapper[4691]: I1124 08:22:50.221733 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hgt8c" event={"ID":"6b572769-5de8-4008-8e9a-5b051438b6b4","Type":"ContainerStarted","Data":"0e852d0c87c8218aa1301677763cabc7e776fc1aee28f5879e607ba469a4ce79"} Nov 24 08:22:51 crc kubenswrapper[4691]: I1124 08:22:51.089589 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:22:51 crc kubenswrapper[4691]: I1124 08:22:51.089659 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:22:51 crc kubenswrapper[4691]: I1124 08:22:51.089706 4691 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 08:22:51 crc kubenswrapper[4691]: I1124 08:22:51.090330 4691 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"baafdc5484696f8a6ed3299009fbc07f5ebbc71075e6742f1979ca5e2e2084cf"} pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 08:22:51 crc kubenswrapper[4691]: I1124 08:22:51.090394 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" containerID="cri-o://baafdc5484696f8a6ed3299009fbc07f5ebbc71075e6742f1979ca5e2e2084cf" gracePeriod=600 Nov 24 08:22:51 crc kubenswrapper[4691]: E1124 08:22:51.214425 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:22:51 crc kubenswrapper[4691]: I1124 08:22:51.235395 4691 generic.go:334] "Generic (PLEG): container finished" podID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerID="baafdc5484696f8a6ed3299009fbc07f5ebbc71075e6742f1979ca5e2e2084cf" exitCode=0 Nov 24 08:22:51 crc kubenswrapper[4691]: I1124 08:22:51.235508 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerDied","Data":"baafdc5484696f8a6ed3299009fbc07f5ebbc71075e6742f1979ca5e2e2084cf"} Nov 24 08:22:51 crc kubenswrapper[4691]: I1124 08:22:51.235555 4691 scope.go:117] "RemoveContainer" containerID="e9457e7b6c4c145ad63f8bf4661b5a86c1fa3b74970f7b54d789669ee7203629" Nov 24 08:22:51 crc kubenswrapper[4691]: I1124 08:22:51.236305 4691 scope.go:117] "RemoveContainer" containerID="baafdc5484696f8a6ed3299009fbc07f5ebbc71075e6742f1979ca5e2e2084cf" Nov 24 08:22:51 crc kubenswrapper[4691]: E1124 08:22:51.236620 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:22:51 crc kubenswrapper[4691]: I1124 08:22:51.241290 4691 generic.go:334] "Generic (PLEG): container finished" podID="6b572769-5de8-4008-8e9a-5b051438b6b4" containerID="0e852d0c87c8218aa1301677763cabc7e776fc1aee28f5879e607ba469a4ce79" exitCode=0 Nov 24 08:22:51 crc kubenswrapper[4691]: I1124 08:22:51.241339 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hgt8c" event={"ID":"6b572769-5de8-4008-8e9a-5b051438b6b4","Type":"ContainerDied","Data":"0e852d0c87c8218aa1301677763cabc7e776fc1aee28f5879e607ba469a4ce79"} Nov 24 08:22:52 crc kubenswrapper[4691]: I1124 08:22:52.259953 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hgt8c" event={"ID":"6b572769-5de8-4008-8e9a-5b051438b6b4","Type":"ContainerStarted","Data":"a429b06d2e92dccf6756fb059dcd67d7cc38ecf811814143edd4b457875eddbd"} Nov 24 08:22:52 crc kubenswrapper[4691]: I1124 08:22:52.302491 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-hgt8c" podStartSLOduration=2.885539339 podStartE2EDuration="5.302466584s" podCreationTimestamp="2025-11-24 08:22:47 +0000 UTC" firstStartedPulling="2025-11-24 08:22:49.212694873 +0000 UTC m=+1531.211644162" lastFinishedPulling="2025-11-24 08:22:51.629622158 +0000 UTC m=+1533.628571407" observedRunningTime="2025-11-24 08:22:52.290701737 +0000 UTC m=+1534.289651006" watchObservedRunningTime="2025-11-24 08:22:52.302466584 +0000 UTC m=+1534.301415833" Nov 24 08:22:57 crc kubenswrapper[4691]: I1124 08:22:57.044075 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-2q47m"] Nov 24 08:22:57 crc kubenswrapper[4691]: I1124 08:22:57.054320 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-2q47m"] Nov 24 08:22:57 crc kubenswrapper[4691]: I1124 08:22:57.637860 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-hgt8c" Nov 24 08:22:57 crc kubenswrapper[4691]: I1124 08:22:57.637983 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-hgt8c" Nov 24 08:22:57 crc kubenswrapper[4691]: I1124 08:22:57.702201 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-hgt8c" Nov 24 08:22:58 crc kubenswrapper[4691]: I1124 08:22:58.483988 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-hgt8c" Nov 24 08:22:58 crc kubenswrapper[4691]: I1124 08:22:58.531913 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-hgt8c"] Nov 24 08:22:58 crc kubenswrapper[4691]: I1124 08:22:58.782474 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="18752e43-a39c-4e17-bf83-831b8361d976" path="/var/lib/kubelet/pods/18752e43-a39c-4e17-bf83-831b8361d976/volumes" Nov 24 08:23:00 crc kubenswrapper[4691]: I1124 08:23:00.455154 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-hgt8c" podUID="6b572769-5de8-4008-8e9a-5b051438b6b4" containerName="registry-server" containerID="cri-o://a429b06d2e92dccf6756fb059dcd67d7cc38ecf811814143edd4b457875eddbd" gracePeriod=2 Nov 24 08:23:01 crc kubenswrapper[4691]: I1124 08:23:01.466047 4691 generic.go:334] "Generic (PLEG): container finished" podID="6b572769-5de8-4008-8e9a-5b051438b6b4" containerID="a429b06d2e92dccf6756fb059dcd67d7cc38ecf811814143edd4b457875eddbd" exitCode=0 Nov 24 08:23:01 crc kubenswrapper[4691]: I1124 08:23:01.466126 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hgt8c" event={"ID":"6b572769-5de8-4008-8e9a-5b051438b6b4","Type":"ContainerDied","Data":"a429b06d2e92dccf6756fb059dcd67d7cc38ecf811814143edd4b457875eddbd"} Nov 24 08:23:01 crc kubenswrapper[4691]: I1124 08:23:01.466764 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hgt8c" event={"ID":"6b572769-5de8-4008-8e9a-5b051438b6b4","Type":"ContainerDied","Data":"e5911d0f814b934e9935da5fef8fe18026308af03f90c455251ff05310a36c3f"} Nov 24 08:23:01 crc kubenswrapper[4691]: I1124 08:23:01.466781 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e5911d0f814b934e9935da5fef8fe18026308af03f90c455251ff05310a36c3f" Nov 24 08:23:01 crc kubenswrapper[4691]: I1124 08:23:01.535260 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hgt8c" Nov 24 08:23:01 crc kubenswrapper[4691]: I1124 08:23:01.718699 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bmrzx\" (UniqueName: \"kubernetes.io/projected/6b572769-5de8-4008-8e9a-5b051438b6b4-kube-api-access-bmrzx\") pod \"6b572769-5de8-4008-8e9a-5b051438b6b4\" (UID: \"6b572769-5de8-4008-8e9a-5b051438b6b4\") " Nov 24 08:23:01 crc kubenswrapper[4691]: I1124 08:23:01.718759 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b572769-5de8-4008-8e9a-5b051438b6b4-utilities\") pod \"6b572769-5de8-4008-8e9a-5b051438b6b4\" (UID: \"6b572769-5de8-4008-8e9a-5b051438b6b4\") " Nov 24 08:23:01 crc kubenswrapper[4691]: I1124 08:23:01.718946 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b572769-5de8-4008-8e9a-5b051438b6b4-catalog-content\") pod \"6b572769-5de8-4008-8e9a-5b051438b6b4\" (UID: \"6b572769-5de8-4008-8e9a-5b051438b6b4\") " Nov 24 08:23:01 crc kubenswrapper[4691]: I1124 08:23:01.720252 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b572769-5de8-4008-8e9a-5b051438b6b4-utilities" (OuterVolumeSpecName: "utilities") pod "6b572769-5de8-4008-8e9a-5b051438b6b4" (UID: "6b572769-5de8-4008-8e9a-5b051438b6b4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:23:01 crc kubenswrapper[4691]: I1124 08:23:01.728168 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b572769-5de8-4008-8e9a-5b051438b6b4-kube-api-access-bmrzx" (OuterVolumeSpecName: "kube-api-access-bmrzx") pod "6b572769-5de8-4008-8e9a-5b051438b6b4" (UID: "6b572769-5de8-4008-8e9a-5b051438b6b4"). InnerVolumeSpecName "kube-api-access-bmrzx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:23:01 crc kubenswrapper[4691]: I1124 08:23:01.737895 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b572769-5de8-4008-8e9a-5b051438b6b4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6b572769-5de8-4008-8e9a-5b051438b6b4" (UID: "6b572769-5de8-4008-8e9a-5b051438b6b4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:23:01 crc kubenswrapper[4691]: I1124 08:23:01.821397 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b572769-5de8-4008-8e9a-5b051438b6b4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 08:23:01 crc kubenswrapper[4691]: I1124 08:23:01.821462 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bmrzx\" (UniqueName: \"kubernetes.io/projected/6b572769-5de8-4008-8e9a-5b051438b6b4-kube-api-access-bmrzx\") on node \"crc\" DevicePath \"\"" Nov 24 08:23:01 crc kubenswrapper[4691]: I1124 08:23:01.821480 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b572769-5de8-4008-8e9a-5b051438b6b4-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 08:23:02 crc kubenswrapper[4691]: I1124 08:23:02.474812 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hgt8c" Nov 24 08:23:02 crc kubenswrapper[4691]: I1124 08:23:02.510025 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-hgt8c"] Nov 24 08:23:02 crc kubenswrapper[4691]: I1124 08:23:02.519081 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-hgt8c"] Nov 24 08:23:02 crc kubenswrapper[4691]: I1124 08:23:02.772827 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6b572769-5de8-4008-8e9a-5b051438b6b4" path="/var/lib/kubelet/pods/6b572769-5de8-4008-8e9a-5b051438b6b4/volumes" Nov 24 08:23:06 crc kubenswrapper[4691]: I1124 08:23:06.761561 4691 scope.go:117] "RemoveContainer" containerID="baafdc5484696f8a6ed3299009fbc07f5ebbc71075e6742f1979ca5e2e2084cf" Nov 24 08:23:06 crc kubenswrapper[4691]: E1124 08:23:06.762358 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:23:09 crc kubenswrapper[4691]: I1124 08:23:09.559387 4691 generic.go:334] "Generic (PLEG): container finished" podID="460ba73d-0917-4b4c-8ca1-141a72e6b3e4" containerID="3a02abe3e32e6808830813b17b047c89514a793f5176904eb578f5d580c8c9a1" exitCode=0 Nov 24 08:23:09 crc kubenswrapper[4691]: I1124 08:23:09.559496 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-svnp5" event={"ID":"460ba73d-0917-4b4c-8ca1-141a72e6b3e4","Type":"ContainerDied","Data":"3a02abe3e32e6808830813b17b047c89514a793f5176904eb578f5d580c8c9a1"} Nov 24 08:23:10 crc kubenswrapper[4691]: I1124 08:23:10.049823 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-fpjgm"] Nov 24 08:23:10 crc kubenswrapper[4691]: I1124 08:23:10.062968 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-11b5-account-create-bbtld"] Nov 24 08:23:10 crc kubenswrapper[4691]: I1124 08:23:10.084390 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-ad5e-account-create-vkzqn"] Nov 24 08:23:10 crc kubenswrapper[4691]: I1124 08:23:10.098006 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-cr6nx"] Nov 24 08:23:10 crc kubenswrapper[4691]: I1124 08:23:10.107461 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-cr6nx"] Nov 24 08:23:10 crc kubenswrapper[4691]: I1124 08:23:10.115987 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-ad5e-account-create-vkzqn"] Nov 24 08:23:10 crc kubenswrapper[4691]: I1124 08:23:10.124494 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-fpjgm"] Nov 24 08:23:10 crc kubenswrapper[4691]: I1124 08:23:10.131088 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-11b5-account-create-bbtld"] Nov 24 08:23:10 crc kubenswrapper[4691]: I1124 08:23:10.138737 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-dsgr5"] Nov 24 08:23:10 crc kubenswrapper[4691]: I1124 08:23:10.147566 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-3e08-account-create-wn9m9"] Nov 24 08:23:10 crc kubenswrapper[4691]: I1124 08:23:10.154783 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-dsgr5"] Nov 24 08:23:10 crc kubenswrapper[4691]: I1124 08:23:10.162690 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-3e08-account-create-wn9m9"] Nov 24 08:23:10 crc kubenswrapper[4691]: I1124 08:23:10.777526 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e0893d9-ac7b-472f-8fc9-ab5dffccf750" path="/var/lib/kubelet/pods/0e0893d9-ac7b-472f-8fc9-ab5dffccf750/volumes" Nov 24 08:23:10 crc kubenswrapper[4691]: I1124 08:23:10.779243 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2268415a-bc56-4f19-8d6e-57c09bc60145" path="/var/lib/kubelet/pods/2268415a-bc56-4f19-8d6e-57c09bc60145/volumes" Nov 24 08:23:10 crc kubenswrapper[4691]: I1124 08:23:10.780584 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="28b9ab5c-c3db-4418-a996-bf6da7141bba" path="/var/lib/kubelet/pods/28b9ab5c-c3db-4418-a996-bf6da7141bba/volumes" Nov 24 08:23:10 crc kubenswrapper[4691]: I1124 08:23:10.782174 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ec830eac-ff50-4522-a9ec-1a6c9870859d" path="/var/lib/kubelet/pods/ec830eac-ff50-4522-a9ec-1a6c9870859d/volumes" Nov 24 08:23:10 crc kubenswrapper[4691]: I1124 08:23:10.785124 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f079598d-3f79-401a-aada-3f81a4fc3555" path="/var/lib/kubelet/pods/f079598d-3f79-401a-aada-3f81a4fc3555/volumes" Nov 24 08:23:10 crc kubenswrapper[4691]: I1124 08:23:10.786543 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4e87f52-369f-4a4f-9d3b-7e430dbac208" path="/var/lib/kubelet/pods/f4e87f52-369f-4a4f-9d3b-7e430dbac208/volumes" Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.139168 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-svnp5" Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.231570 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/460ba73d-0917-4b4c-8ca1-141a72e6b3e4-inventory\") pod \"460ba73d-0917-4b4c-8ca1-141a72e6b3e4\" (UID: \"460ba73d-0917-4b4c-8ca1-141a72e6b3e4\") " Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.231809 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/460ba73d-0917-4b4c-8ca1-141a72e6b3e4-ssh-key\") pod \"460ba73d-0917-4b4c-8ca1-141a72e6b3e4\" (UID: \"460ba73d-0917-4b4c-8ca1-141a72e6b3e4\") " Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.231894 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbf2z\" (UniqueName: \"kubernetes.io/projected/460ba73d-0917-4b4c-8ca1-141a72e6b3e4-kube-api-access-dbf2z\") pod \"460ba73d-0917-4b4c-8ca1-141a72e6b3e4\" (UID: \"460ba73d-0917-4b4c-8ca1-141a72e6b3e4\") " Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.237401 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/460ba73d-0917-4b4c-8ca1-141a72e6b3e4-kube-api-access-dbf2z" (OuterVolumeSpecName: "kube-api-access-dbf2z") pod "460ba73d-0917-4b4c-8ca1-141a72e6b3e4" (UID: "460ba73d-0917-4b4c-8ca1-141a72e6b3e4"). InnerVolumeSpecName "kube-api-access-dbf2z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.264976 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/460ba73d-0917-4b4c-8ca1-141a72e6b3e4-inventory" (OuterVolumeSpecName: "inventory") pod "460ba73d-0917-4b4c-8ca1-141a72e6b3e4" (UID: "460ba73d-0917-4b4c-8ca1-141a72e6b3e4"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.273480 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/460ba73d-0917-4b4c-8ca1-141a72e6b3e4-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "460ba73d-0917-4b4c-8ca1-141a72e6b3e4" (UID: "460ba73d-0917-4b4c-8ca1-141a72e6b3e4"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.334746 4691 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/460ba73d-0917-4b4c-8ca1-141a72e6b3e4-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.334854 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbf2z\" (UniqueName: \"kubernetes.io/projected/460ba73d-0917-4b4c-8ca1-141a72e6b3e4-kube-api-access-dbf2z\") on node \"crc\" DevicePath \"\"" Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.334874 4691 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/460ba73d-0917-4b4c-8ca1-141a72e6b3e4-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.580724 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-svnp5" event={"ID":"460ba73d-0917-4b4c-8ca1-141a72e6b3e4","Type":"ContainerDied","Data":"d06af14aa1436169f0b8568b96140ba93c4395d7ca1fd422447061db3c5a4a3d"} Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.581048 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d06af14aa1436169f0b8568b96140ba93c4395d7ca1fd422447061db3c5a4a3d" Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.580830 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-svnp5" Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.691829 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-764zv"] Nov 24 08:23:11 crc kubenswrapper[4691]: E1124 08:23:11.692655 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="460ba73d-0917-4b4c-8ca1-141a72e6b3e4" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.692701 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="460ba73d-0917-4b4c-8ca1-141a72e6b3e4" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 24 08:23:11 crc kubenswrapper[4691]: E1124 08:23:11.692743 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b572769-5de8-4008-8e9a-5b051438b6b4" containerName="extract-content" Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.692761 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b572769-5de8-4008-8e9a-5b051438b6b4" containerName="extract-content" Nov 24 08:23:11 crc kubenswrapper[4691]: E1124 08:23:11.692799 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b572769-5de8-4008-8e9a-5b051438b6b4" containerName="registry-server" Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.692816 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b572769-5de8-4008-8e9a-5b051438b6b4" containerName="registry-server" Nov 24 08:23:11 crc kubenswrapper[4691]: E1124 08:23:11.692871 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b572769-5de8-4008-8e9a-5b051438b6b4" containerName="extract-utilities" Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.692888 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b572769-5de8-4008-8e9a-5b051438b6b4" containerName="extract-utilities" Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.693369 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b572769-5de8-4008-8e9a-5b051438b6b4" containerName="registry-server" Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.693430 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="460ba73d-0917-4b4c-8ca1-141a72e6b3e4" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.694693 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-764zv" Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.696971 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.698357 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-l69jd" Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.698430 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.698529 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.704922 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-764zv"] Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.743723 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7a0ce3be-4dc4-4451-979d-0f8a4372e061-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-764zv\" (UID: \"7a0ce3be-4dc4-4451-979d-0f8a4372e061\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-764zv" Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.743952 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wrgmh\" (UniqueName: \"kubernetes.io/projected/7a0ce3be-4dc4-4451-979d-0f8a4372e061-kube-api-access-wrgmh\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-764zv\" (UID: \"7a0ce3be-4dc4-4451-979d-0f8a4372e061\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-764zv" Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.744002 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7a0ce3be-4dc4-4451-979d-0f8a4372e061-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-764zv\" (UID: \"7a0ce3be-4dc4-4451-979d-0f8a4372e061\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-764zv" Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.857313 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wrgmh\" (UniqueName: \"kubernetes.io/projected/7a0ce3be-4dc4-4451-979d-0f8a4372e061-kube-api-access-wrgmh\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-764zv\" (UID: \"7a0ce3be-4dc4-4451-979d-0f8a4372e061\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-764zv" Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.857469 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7a0ce3be-4dc4-4451-979d-0f8a4372e061-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-764zv\" (UID: \"7a0ce3be-4dc4-4451-979d-0f8a4372e061\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-764zv" Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.857509 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7a0ce3be-4dc4-4451-979d-0f8a4372e061-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-764zv\" (UID: \"7a0ce3be-4dc4-4451-979d-0f8a4372e061\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-764zv" Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.871969 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7a0ce3be-4dc4-4451-979d-0f8a4372e061-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-764zv\" (UID: \"7a0ce3be-4dc4-4451-979d-0f8a4372e061\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-764zv" Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.872787 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7a0ce3be-4dc4-4451-979d-0f8a4372e061-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-764zv\" (UID: \"7a0ce3be-4dc4-4451-979d-0f8a4372e061\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-764zv" Nov 24 08:23:11 crc kubenswrapper[4691]: I1124 08:23:11.881182 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrgmh\" (UniqueName: \"kubernetes.io/projected/7a0ce3be-4dc4-4451-979d-0f8a4372e061-kube-api-access-wrgmh\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-764zv\" (UID: \"7a0ce3be-4dc4-4451-979d-0f8a4372e061\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-764zv" Nov 24 08:23:12 crc kubenswrapper[4691]: I1124 08:23:12.028597 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-764zv" Nov 24 08:23:12 crc kubenswrapper[4691]: I1124 08:23:12.412215 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-764zv"] Nov 24 08:23:12 crc kubenswrapper[4691]: I1124 08:23:12.413928 4691 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 08:23:12 crc kubenswrapper[4691]: I1124 08:23:12.589937 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-764zv" event={"ID":"7a0ce3be-4dc4-4451-979d-0f8a4372e061","Type":"ContainerStarted","Data":"143142a97ac77ccec443bb36e21e7b94780533da02fece900c2d1944944f1f75"} Nov 24 08:23:13 crc kubenswrapper[4691]: I1124 08:23:13.054925 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-l7k7s"] Nov 24 08:23:13 crc kubenswrapper[4691]: I1124 08:23:13.066199 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-l7k7s"] Nov 24 08:23:13 crc kubenswrapper[4691]: I1124 08:23:13.607040 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-764zv" event={"ID":"7a0ce3be-4dc4-4451-979d-0f8a4372e061","Type":"ContainerStarted","Data":"f6b84b7ec87b02f6aed2fc16f6a12a764573339324bcab96c66fa70cf7bf0f47"} Nov 24 08:23:13 crc kubenswrapper[4691]: I1124 08:23:13.628469 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-764zv" podStartSLOduration=2.131906253 podStartE2EDuration="2.628417732s" podCreationTimestamp="2025-11-24 08:23:11 +0000 UTC" firstStartedPulling="2025-11-24 08:23:12.413688415 +0000 UTC m=+1554.412637664" lastFinishedPulling="2025-11-24 08:23:12.910199854 +0000 UTC m=+1554.909149143" observedRunningTime="2025-11-24 08:23:13.623328986 +0000 UTC m=+1555.622278295" watchObservedRunningTime="2025-11-24 08:23:13.628417732 +0000 UTC m=+1555.627366991" Nov 24 08:23:14 crc kubenswrapper[4691]: I1124 08:23:14.777873 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0eeca96b-473b-466d-92bd-a1c3fdd22dac" path="/var/lib/kubelet/pods/0eeca96b-473b-466d-92bd-a1c3fdd22dac/volumes" Nov 24 08:23:20 crc kubenswrapper[4691]: I1124 08:23:20.761786 4691 scope.go:117] "RemoveContainer" containerID="baafdc5484696f8a6ed3299009fbc07f5ebbc71075e6742f1979ca5e2e2084cf" Nov 24 08:23:20 crc kubenswrapper[4691]: E1124 08:23:20.763124 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:23:21 crc kubenswrapper[4691]: I1124 08:23:21.330609 4691 scope.go:117] "RemoveContainer" containerID="5b6af859292ef981db8c91f78740d2480e285acb102629cbf0c98d1978f57809" Nov 24 08:23:21 crc kubenswrapper[4691]: I1124 08:23:21.373797 4691 scope.go:117] "RemoveContainer" containerID="5310e276bba6b5674b4fce5cc4162f1271ecba754d4ffbf745b698aa6ef58d85" Nov 24 08:23:21 crc kubenswrapper[4691]: I1124 08:23:21.447302 4691 scope.go:117] "RemoveContainer" containerID="98cf59f60c9aeb64f6d56ab4e649b05517cd8ef25e5eda80bb6a36192131f490" Nov 24 08:23:21 crc kubenswrapper[4691]: I1124 08:23:21.494935 4691 scope.go:117] "RemoveContainer" containerID="7010ab2affce9184727a1c99de5866f8009be86e52e9154027f161543b10f4c6" Nov 24 08:23:21 crc kubenswrapper[4691]: I1124 08:23:21.538155 4691 scope.go:117] "RemoveContainer" containerID="023df6d958e8e53c7f7fe9b6d3f94c2f1a2130fae6d79ff982ea23ab4b4b6323" Nov 24 08:23:21 crc kubenswrapper[4691]: I1124 08:23:21.585606 4691 scope.go:117] "RemoveContainer" containerID="52b240dddc8b3e0e59777ef1e957167213657af7e54590b02aa020cc75dedf91" Nov 24 08:23:21 crc kubenswrapper[4691]: I1124 08:23:21.639802 4691 scope.go:117] "RemoveContainer" containerID="28dd174184bd6b03d20c693f712f859e498b4627a0ca65c49b043f761e796e17" Nov 24 08:23:21 crc kubenswrapper[4691]: I1124 08:23:21.691850 4691 scope.go:117] "RemoveContainer" containerID="37b4cd83135c3b23844154305c718d0813b2d753b757b3109e43bc22a35f7468" Nov 24 08:23:21 crc kubenswrapper[4691]: I1124 08:23:21.729983 4691 scope.go:117] "RemoveContainer" containerID="22da12dae1417be7880c317676aa0973d20bdd55a2ab69418c94a8cdc852fe92" Nov 24 08:23:21 crc kubenswrapper[4691]: I1124 08:23:21.748545 4691 scope.go:117] "RemoveContainer" containerID="229545ef18ae187001be86fc5cccd8cf4173dd8de0e2605703b7161d4581c422" Nov 24 08:23:21 crc kubenswrapper[4691]: I1124 08:23:21.782281 4691 scope.go:117] "RemoveContainer" containerID="417161d17090e868f1ee6b16e362945763298a2a58ba4f0edf696cae1c28bff8" Nov 24 08:23:21 crc kubenswrapper[4691]: I1124 08:23:21.802945 4691 scope.go:117] "RemoveContainer" containerID="30777a7f80a51c4a40862fa2242b4fe079d0b51f4ef1456fe6e0a758957b7044" Nov 24 08:23:21 crc kubenswrapper[4691]: I1124 08:23:21.823814 4691 scope.go:117] "RemoveContainer" containerID="7d68e93e90b64a1f520a5078c231ec36df136e428aab05d343e0295b7efebd84" Nov 24 08:23:21 crc kubenswrapper[4691]: I1124 08:23:21.846170 4691 scope.go:117] "RemoveContainer" containerID="ef6a16148c5d039194bbc38f645fc84819158a17e73be2ea9035029ff79c2bbd" Nov 24 08:23:21 crc kubenswrapper[4691]: I1124 08:23:21.873007 4691 scope.go:117] "RemoveContainer" containerID="92a37b08e7d4f94854eeeea4cfa07de7639a21088c70615b2e62d2626c6e2f04" Nov 24 08:23:21 crc kubenswrapper[4691]: I1124 08:23:21.899285 4691 scope.go:117] "RemoveContainer" containerID="d2043d5361ff87e2a2d9bfd8e05d81cdf277a395feb5d8066f0cef445fcd6d75" Nov 24 08:23:21 crc kubenswrapper[4691]: I1124 08:23:21.919902 4691 scope.go:117] "RemoveContainer" containerID="5b50b74e2accae29b82dd33ed0b29f4886af782fefb71bf61c0d8437fe5dec03" Nov 24 08:23:31 crc kubenswrapper[4691]: I1124 08:23:31.760782 4691 scope.go:117] "RemoveContainer" containerID="baafdc5484696f8a6ed3299009fbc07f5ebbc71075e6742f1979ca5e2e2084cf" Nov 24 08:23:31 crc kubenswrapper[4691]: E1124 08:23:31.762082 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:23:46 crc kubenswrapper[4691]: I1124 08:23:46.075789 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-4jnwp"] Nov 24 08:23:46 crc kubenswrapper[4691]: I1124 08:23:46.084632 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-4jnwp"] Nov 24 08:23:46 crc kubenswrapper[4691]: I1124 08:23:46.761213 4691 scope.go:117] "RemoveContainer" containerID="baafdc5484696f8a6ed3299009fbc07f5ebbc71075e6742f1979ca5e2e2084cf" Nov 24 08:23:46 crc kubenswrapper[4691]: E1124 08:23:46.761789 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:23:46 crc kubenswrapper[4691]: I1124 08:23:46.782690 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e4bd742d-a7a1-402b-b1fa-9dde10e15952" path="/var/lib/kubelet/pods/e4bd742d-a7a1-402b-b1fa-9dde10e15952/volumes" Nov 24 08:23:57 crc kubenswrapper[4691]: I1124 08:23:57.033629 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-tx7zx"] Nov 24 08:23:57 crc kubenswrapper[4691]: I1124 08:23:57.042621 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-tx7zx"] Nov 24 08:23:58 crc kubenswrapper[4691]: I1124 08:23:58.774534 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d1c22b4d-4593-461a-9096-f81674b136b7" path="/var/lib/kubelet/pods/d1c22b4d-4593-461a-9096-f81674b136b7/volumes" Nov 24 08:23:59 crc kubenswrapper[4691]: I1124 08:23:59.031387 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-qvhpm"] Nov 24 08:23:59 crc kubenswrapper[4691]: I1124 08:23:59.038497 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-8g8rz"] Nov 24 08:23:59 crc kubenswrapper[4691]: I1124 08:23:59.046095 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-qvhpm"] Nov 24 08:23:59 crc kubenswrapper[4691]: I1124 08:23:59.054158 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-8g8rz"] Nov 24 08:24:00 crc kubenswrapper[4691]: I1124 08:24:00.761770 4691 scope.go:117] "RemoveContainer" containerID="baafdc5484696f8a6ed3299009fbc07f5ebbc71075e6742f1979ca5e2e2084cf" Nov 24 08:24:00 crc kubenswrapper[4691]: E1124 08:24:00.762476 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:24:00 crc kubenswrapper[4691]: I1124 08:24:00.774272 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09239709-f618-437f-a720-070aff572294" path="/var/lib/kubelet/pods/09239709-f618-437f-a720-070aff572294/volumes" Nov 24 08:24:00 crc kubenswrapper[4691]: I1124 08:24:00.777268 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="faf5645f-a25c-4bde-9769-51e1681b7eba" path="/var/lib/kubelet/pods/faf5645f-a25c-4bde-9769-51e1681b7eba/volumes" Nov 24 08:24:08 crc kubenswrapper[4691]: I1124 08:24:08.055475 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-vsrzv"] Nov 24 08:24:08 crc kubenswrapper[4691]: I1124 08:24:08.069622 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-vsrzv"] Nov 24 08:24:08 crc kubenswrapper[4691]: I1124 08:24:08.785922 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29ff644c-aef6-4092-9dcf-1b4562e662d4" path="/var/lib/kubelet/pods/29ff644c-aef6-4092-9dcf-1b4562e662d4/volumes" Nov 24 08:24:15 crc kubenswrapper[4691]: I1124 08:24:15.762617 4691 scope.go:117] "RemoveContainer" containerID="baafdc5484696f8a6ed3299009fbc07f5ebbc71075e6742f1979ca5e2e2084cf" Nov 24 08:24:15 crc kubenswrapper[4691]: E1124 08:24:15.763696 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:24:22 crc kubenswrapper[4691]: I1124 08:24:22.170482 4691 scope.go:117] "RemoveContainer" containerID="887c882bd1c813a99b7ee9710fdb9ffcb4a7f44ce0c7818337a53a27ae5d63ad" Nov 24 08:24:22 crc kubenswrapper[4691]: I1124 08:24:22.235228 4691 scope.go:117] "RemoveContainer" containerID="50c8d054ee05ed39c26785d05b501867d1dcf4ff7d3640dbd3b26d4295f2a892" Nov 24 08:24:22 crc kubenswrapper[4691]: I1124 08:24:22.285773 4691 scope.go:117] "RemoveContainer" containerID="f47dc7fc96bcd12e987f996a20a019c09ad4dc94d54b28f91317c714fd6479dc" Nov 24 08:24:22 crc kubenswrapper[4691]: I1124 08:24:22.345701 4691 scope.go:117] "RemoveContainer" containerID="ece511d3f226f171956b90efd3e6ccc5790855743bad985a4cf5a2e3be617515" Nov 24 08:24:22 crc kubenswrapper[4691]: I1124 08:24:22.372757 4691 scope.go:117] "RemoveContainer" containerID="3acae0443029415644f3322c8e8498e0a985411835b7279f62aa62e60b852762" Nov 24 08:24:24 crc kubenswrapper[4691]: I1124 08:24:24.411119 4691 generic.go:334] "Generic (PLEG): container finished" podID="7a0ce3be-4dc4-4451-979d-0f8a4372e061" containerID="f6b84b7ec87b02f6aed2fc16f6a12a764573339324bcab96c66fa70cf7bf0f47" exitCode=0 Nov 24 08:24:24 crc kubenswrapper[4691]: I1124 08:24:24.411207 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-764zv" event={"ID":"7a0ce3be-4dc4-4451-979d-0f8a4372e061","Type":"ContainerDied","Data":"f6b84b7ec87b02f6aed2fc16f6a12a764573339324bcab96c66fa70cf7bf0f47"} Nov 24 08:24:25 crc kubenswrapper[4691]: I1124 08:24:25.877938 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-764zv" Nov 24 08:24:26 crc kubenswrapper[4691]: I1124 08:24:26.031821 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7a0ce3be-4dc4-4451-979d-0f8a4372e061-inventory\") pod \"7a0ce3be-4dc4-4451-979d-0f8a4372e061\" (UID: \"7a0ce3be-4dc4-4451-979d-0f8a4372e061\") " Nov 24 08:24:26 crc kubenswrapper[4691]: I1124 08:24:26.031903 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7a0ce3be-4dc4-4451-979d-0f8a4372e061-ssh-key\") pod \"7a0ce3be-4dc4-4451-979d-0f8a4372e061\" (UID: \"7a0ce3be-4dc4-4451-979d-0f8a4372e061\") " Nov 24 08:24:26 crc kubenswrapper[4691]: I1124 08:24:26.031950 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wrgmh\" (UniqueName: \"kubernetes.io/projected/7a0ce3be-4dc4-4451-979d-0f8a4372e061-kube-api-access-wrgmh\") pod \"7a0ce3be-4dc4-4451-979d-0f8a4372e061\" (UID: \"7a0ce3be-4dc4-4451-979d-0f8a4372e061\") " Nov 24 08:24:26 crc kubenswrapper[4691]: I1124 08:24:26.047174 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a0ce3be-4dc4-4451-979d-0f8a4372e061-kube-api-access-wrgmh" (OuterVolumeSpecName: "kube-api-access-wrgmh") pod "7a0ce3be-4dc4-4451-979d-0f8a4372e061" (UID: "7a0ce3be-4dc4-4451-979d-0f8a4372e061"). InnerVolumeSpecName "kube-api-access-wrgmh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:24:26 crc kubenswrapper[4691]: I1124 08:24:26.074198 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a0ce3be-4dc4-4451-979d-0f8a4372e061-inventory" (OuterVolumeSpecName: "inventory") pod "7a0ce3be-4dc4-4451-979d-0f8a4372e061" (UID: "7a0ce3be-4dc4-4451-979d-0f8a4372e061"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:24:26 crc kubenswrapper[4691]: I1124 08:24:26.076755 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a0ce3be-4dc4-4451-979d-0f8a4372e061-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7a0ce3be-4dc4-4451-979d-0f8a4372e061" (UID: "7a0ce3be-4dc4-4451-979d-0f8a4372e061"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:24:26 crc kubenswrapper[4691]: I1124 08:24:26.134371 4691 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7a0ce3be-4dc4-4451-979d-0f8a4372e061-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 08:24:26 crc kubenswrapper[4691]: I1124 08:24:26.134422 4691 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7a0ce3be-4dc4-4451-979d-0f8a4372e061-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 08:24:26 crc kubenswrapper[4691]: I1124 08:24:26.134432 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wrgmh\" (UniqueName: \"kubernetes.io/projected/7a0ce3be-4dc4-4451-979d-0f8a4372e061-kube-api-access-wrgmh\") on node \"crc\" DevicePath \"\"" Nov 24 08:24:26 crc kubenswrapper[4691]: I1124 08:24:26.451859 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-764zv" event={"ID":"7a0ce3be-4dc4-4451-979d-0f8a4372e061","Type":"ContainerDied","Data":"143142a97ac77ccec443bb36e21e7b94780533da02fece900c2d1944944f1f75"} Nov 24 08:24:26 crc kubenswrapper[4691]: I1124 08:24:26.451925 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="143142a97ac77ccec443bb36e21e7b94780533da02fece900c2d1944944f1f75" Nov 24 08:24:26 crc kubenswrapper[4691]: I1124 08:24:26.452029 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-764zv" Nov 24 08:24:26 crc kubenswrapper[4691]: I1124 08:24:26.627154 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zbkq4"] Nov 24 08:24:26 crc kubenswrapper[4691]: E1124 08:24:26.627942 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a0ce3be-4dc4-4451-979d-0f8a4372e061" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 24 08:24:26 crc kubenswrapper[4691]: I1124 08:24:26.627997 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a0ce3be-4dc4-4451-979d-0f8a4372e061" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 24 08:24:26 crc kubenswrapper[4691]: I1124 08:24:26.628511 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a0ce3be-4dc4-4451-979d-0f8a4372e061" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 24 08:24:26 crc kubenswrapper[4691]: I1124 08:24:26.629779 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zbkq4" Nov 24 08:24:26 crc kubenswrapper[4691]: I1124 08:24:26.631995 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-l69jd" Nov 24 08:24:26 crc kubenswrapper[4691]: I1124 08:24:26.632220 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 08:24:26 crc kubenswrapper[4691]: I1124 08:24:26.632891 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 08:24:26 crc kubenswrapper[4691]: I1124 08:24:26.633196 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 08:24:26 crc kubenswrapper[4691]: I1124 08:24:26.650069 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zbkq4"] Nov 24 08:24:26 crc kubenswrapper[4691]: I1124 08:24:26.747262 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fb487f8d-8df8-4b2d-9b08-647a942d8559-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-zbkq4\" (UID: \"fb487f8d-8df8-4b2d-9b08-647a942d8559\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zbkq4" Nov 24 08:24:26 crc kubenswrapper[4691]: I1124 08:24:26.747319 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fb487f8d-8df8-4b2d-9b08-647a942d8559-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-zbkq4\" (UID: \"fb487f8d-8df8-4b2d-9b08-647a942d8559\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zbkq4" Nov 24 08:24:26 crc kubenswrapper[4691]: I1124 08:24:26.747574 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c9492\" (UniqueName: \"kubernetes.io/projected/fb487f8d-8df8-4b2d-9b08-647a942d8559-kube-api-access-c9492\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-zbkq4\" (UID: \"fb487f8d-8df8-4b2d-9b08-647a942d8559\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zbkq4" Nov 24 08:24:26 crc kubenswrapper[4691]: I1124 08:24:26.761744 4691 scope.go:117] "RemoveContainer" containerID="baafdc5484696f8a6ed3299009fbc07f5ebbc71075e6742f1979ca5e2e2084cf" Nov 24 08:24:26 crc kubenswrapper[4691]: E1124 08:24:26.762272 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:24:26 crc kubenswrapper[4691]: I1124 08:24:26.850705 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c9492\" (UniqueName: \"kubernetes.io/projected/fb487f8d-8df8-4b2d-9b08-647a942d8559-kube-api-access-c9492\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-zbkq4\" (UID: \"fb487f8d-8df8-4b2d-9b08-647a942d8559\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zbkq4" Nov 24 08:24:26 crc kubenswrapper[4691]: I1124 08:24:26.851438 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fb487f8d-8df8-4b2d-9b08-647a942d8559-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-zbkq4\" (UID: \"fb487f8d-8df8-4b2d-9b08-647a942d8559\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zbkq4" Nov 24 08:24:26 crc kubenswrapper[4691]: I1124 08:24:26.851942 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fb487f8d-8df8-4b2d-9b08-647a942d8559-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-zbkq4\" (UID: \"fb487f8d-8df8-4b2d-9b08-647a942d8559\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zbkq4" Nov 24 08:24:26 crc kubenswrapper[4691]: I1124 08:24:26.856622 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fb487f8d-8df8-4b2d-9b08-647a942d8559-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-zbkq4\" (UID: \"fb487f8d-8df8-4b2d-9b08-647a942d8559\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zbkq4" Nov 24 08:24:26 crc kubenswrapper[4691]: I1124 08:24:26.860026 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fb487f8d-8df8-4b2d-9b08-647a942d8559-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-zbkq4\" (UID: \"fb487f8d-8df8-4b2d-9b08-647a942d8559\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zbkq4" Nov 24 08:24:26 crc kubenswrapper[4691]: I1124 08:24:26.874745 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c9492\" (UniqueName: \"kubernetes.io/projected/fb487f8d-8df8-4b2d-9b08-647a942d8559-kube-api-access-c9492\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-zbkq4\" (UID: \"fb487f8d-8df8-4b2d-9b08-647a942d8559\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zbkq4" Nov 24 08:24:26 crc kubenswrapper[4691]: I1124 08:24:26.958962 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zbkq4" Nov 24 08:24:27 crc kubenswrapper[4691]: I1124 08:24:27.545374 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zbkq4"] Nov 24 08:24:28 crc kubenswrapper[4691]: I1124 08:24:28.475661 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zbkq4" event={"ID":"fb487f8d-8df8-4b2d-9b08-647a942d8559","Type":"ContainerStarted","Data":"daaac8c9ba48a9af1c0f202a7f2ec100c64653ad0de2d75618fefcf21213c1c5"} Nov 24 08:24:28 crc kubenswrapper[4691]: I1124 08:24:28.476143 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zbkq4" event={"ID":"fb487f8d-8df8-4b2d-9b08-647a942d8559","Type":"ContainerStarted","Data":"8134b2cbe350bccad27e8d3ed12ab82ca58335f0000aaf1f5cb33de474b8c106"} Nov 24 08:24:28 crc kubenswrapper[4691]: I1124 08:24:28.505301 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zbkq4" podStartSLOduration=2.041497361 podStartE2EDuration="2.505279875s" podCreationTimestamp="2025-11-24 08:24:26 +0000 UTC" firstStartedPulling="2025-11-24 08:24:27.55331419 +0000 UTC m=+1629.552263439" lastFinishedPulling="2025-11-24 08:24:28.017096704 +0000 UTC m=+1630.016045953" observedRunningTime="2025-11-24 08:24:28.496873232 +0000 UTC m=+1630.495822491" watchObservedRunningTime="2025-11-24 08:24:28.505279875 +0000 UTC m=+1630.504229144" Nov 24 08:24:33 crc kubenswrapper[4691]: I1124 08:24:33.546038 4691 generic.go:334] "Generic (PLEG): container finished" podID="fb487f8d-8df8-4b2d-9b08-647a942d8559" containerID="daaac8c9ba48a9af1c0f202a7f2ec100c64653ad0de2d75618fefcf21213c1c5" exitCode=0 Nov 24 08:24:33 crc kubenswrapper[4691]: I1124 08:24:33.546100 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zbkq4" event={"ID":"fb487f8d-8df8-4b2d-9b08-647a942d8559","Type":"ContainerDied","Data":"daaac8c9ba48a9af1c0f202a7f2ec100c64653ad0de2d75618fefcf21213c1c5"} Nov 24 08:24:34 crc kubenswrapper[4691]: I1124 08:24:34.966001 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zbkq4" Nov 24 08:24:35 crc kubenswrapper[4691]: I1124 08:24:35.065287 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fb487f8d-8df8-4b2d-9b08-647a942d8559-inventory\") pod \"fb487f8d-8df8-4b2d-9b08-647a942d8559\" (UID: \"fb487f8d-8df8-4b2d-9b08-647a942d8559\") " Nov 24 08:24:35 crc kubenswrapper[4691]: I1124 08:24:35.065404 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fb487f8d-8df8-4b2d-9b08-647a942d8559-ssh-key\") pod \"fb487f8d-8df8-4b2d-9b08-647a942d8559\" (UID: \"fb487f8d-8df8-4b2d-9b08-647a942d8559\") " Nov 24 08:24:35 crc kubenswrapper[4691]: I1124 08:24:35.065524 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c9492\" (UniqueName: \"kubernetes.io/projected/fb487f8d-8df8-4b2d-9b08-647a942d8559-kube-api-access-c9492\") pod \"fb487f8d-8df8-4b2d-9b08-647a942d8559\" (UID: \"fb487f8d-8df8-4b2d-9b08-647a942d8559\") " Nov 24 08:24:35 crc kubenswrapper[4691]: I1124 08:24:35.071343 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb487f8d-8df8-4b2d-9b08-647a942d8559-kube-api-access-c9492" (OuterVolumeSpecName: "kube-api-access-c9492") pod "fb487f8d-8df8-4b2d-9b08-647a942d8559" (UID: "fb487f8d-8df8-4b2d-9b08-647a942d8559"). InnerVolumeSpecName "kube-api-access-c9492". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:24:35 crc kubenswrapper[4691]: I1124 08:24:35.096255 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb487f8d-8df8-4b2d-9b08-647a942d8559-inventory" (OuterVolumeSpecName: "inventory") pod "fb487f8d-8df8-4b2d-9b08-647a942d8559" (UID: "fb487f8d-8df8-4b2d-9b08-647a942d8559"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:24:35 crc kubenswrapper[4691]: I1124 08:24:35.102960 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb487f8d-8df8-4b2d-9b08-647a942d8559-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "fb487f8d-8df8-4b2d-9b08-647a942d8559" (UID: "fb487f8d-8df8-4b2d-9b08-647a942d8559"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:24:35 crc kubenswrapper[4691]: I1124 08:24:35.169082 4691 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fb487f8d-8df8-4b2d-9b08-647a942d8559-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 08:24:35 crc kubenswrapper[4691]: I1124 08:24:35.169129 4691 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fb487f8d-8df8-4b2d-9b08-647a942d8559-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 08:24:35 crc kubenswrapper[4691]: I1124 08:24:35.169146 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c9492\" (UniqueName: \"kubernetes.io/projected/fb487f8d-8df8-4b2d-9b08-647a942d8559-kube-api-access-c9492\") on node \"crc\" DevicePath \"\"" Nov 24 08:24:35 crc kubenswrapper[4691]: I1124 08:24:35.576905 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zbkq4" event={"ID":"fb487f8d-8df8-4b2d-9b08-647a942d8559","Type":"ContainerDied","Data":"8134b2cbe350bccad27e8d3ed12ab82ca58335f0000aaf1f5cb33de474b8c106"} Nov 24 08:24:35 crc kubenswrapper[4691]: I1124 08:24:35.576957 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8134b2cbe350bccad27e8d3ed12ab82ca58335f0000aaf1f5cb33de474b8c106" Nov 24 08:24:35 crc kubenswrapper[4691]: I1124 08:24:35.577004 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zbkq4" Nov 24 08:24:35 crc kubenswrapper[4691]: I1124 08:24:35.683838 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-dphxx"] Nov 24 08:24:35 crc kubenswrapper[4691]: E1124 08:24:35.685677 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb487f8d-8df8-4b2d-9b08-647a942d8559" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 24 08:24:35 crc kubenswrapper[4691]: I1124 08:24:35.685711 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb487f8d-8df8-4b2d-9b08-647a942d8559" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 24 08:24:35 crc kubenswrapper[4691]: I1124 08:24:35.686018 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb487f8d-8df8-4b2d-9b08-647a942d8559" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 24 08:24:35 crc kubenswrapper[4691]: I1124 08:24:35.687041 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-dphxx" Nov 24 08:24:35 crc kubenswrapper[4691]: I1124 08:24:35.690367 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-l69jd" Nov 24 08:24:35 crc kubenswrapper[4691]: I1124 08:24:35.690418 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 08:24:35 crc kubenswrapper[4691]: I1124 08:24:35.690659 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 08:24:35 crc kubenswrapper[4691]: I1124 08:24:35.696077 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 08:24:35 crc kubenswrapper[4691]: I1124 08:24:35.703025 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-dphxx"] Nov 24 08:24:35 crc kubenswrapper[4691]: I1124 08:24:35.782119 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5368c577-e1f7-45bf-9102-4e5422934e63-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-dphxx\" (UID: \"5368c577-e1f7-45bf-9102-4e5422934e63\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-dphxx" Nov 24 08:24:35 crc kubenswrapper[4691]: I1124 08:24:35.782539 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vhtfr\" (UniqueName: \"kubernetes.io/projected/5368c577-e1f7-45bf-9102-4e5422934e63-kube-api-access-vhtfr\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-dphxx\" (UID: \"5368c577-e1f7-45bf-9102-4e5422934e63\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-dphxx" Nov 24 08:24:35 crc kubenswrapper[4691]: I1124 08:24:35.783101 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5368c577-e1f7-45bf-9102-4e5422934e63-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-dphxx\" (UID: \"5368c577-e1f7-45bf-9102-4e5422934e63\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-dphxx" Nov 24 08:24:35 crc kubenswrapper[4691]: I1124 08:24:35.885426 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5368c577-e1f7-45bf-9102-4e5422934e63-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-dphxx\" (UID: \"5368c577-e1f7-45bf-9102-4e5422934e63\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-dphxx" Nov 24 08:24:35 crc kubenswrapper[4691]: I1124 08:24:35.885528 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5368c577-e1f7-45bf-9102-4e5422934e63-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-dphxx\" (UID: \"5368c577-e1f7-45bf-9102-4e5422934e63\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-dphxx" Nov 24 08:24:35 crc kubenswrapper[4691]: I1124 08:24:35.885609 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vhtfr\" (UniqueName: \"kubernetes.io/projected/5368c577-e1f7-45bf-9102-4e5422934e63-kube-api-access-vhtfr\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-dphxx\" (UID: \"5368c577-e1f7-45bf-9102-4e5422934e63\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-dphxx" Nov 24 08:24:35 crc kubenswrapper[4691]: I1124 08:24:35.892305 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5368c577-e1f7-45bf-9102-4e5422934e63-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-dphxx\" (UID: \"5368c577-e1f7-45bf-9102-4e5422934e63\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-dphxx" Nov 24 08:24:35 crc kubenswrapper[4691]: I1124 08:24:35.892439 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5368c577-e1f7-45bf-9102-4e5422934e63-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-dphxx\" (UID: \"5368c577-e1f7-45bf-9102-4e5422934e63\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-dphxx" Nov 24 08:24:35 crc kubenswrapper[4691]: I1124 08:24:35.917516 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vhtfr\" (UniqueName: \"kubernetes.io/projected/5368c577-e1f7-45bf-9102-4e5422934e63-kube-api-access-vhtfr\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-dphxx\" (UID: \"5368c577-e1f7-45bf-9102-4e5422934e63\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-dphxx" Nov 24 08:24:36 crc kubenswrapper[4691]: I1124 08:24:36.018690 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-dphxx" Nov 24 08:24:36 crc kubenswrapper[4691]: W1124 08:24:36.555684 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5368c577_e1f7_45bf_9102_4e5422934e63.slice/crio-4017a4a64e0bd3a7576ba3d8dbdc245d166916dad5c6c8e6e594509e20bd2a0c WatchSource:0}: Error finding container 4017a4a64e0bd3a7576ba3d8dbdc245d166916dad5c6c8e6e594509e20bd2a0c: Status 404 returned error can't find the container with id 4017a4a64e0bd3a7576ba3d8dbdc245d166916dad5c6c8e6e594509e20bd2a0c Nov 24 08:24:36 crc kubenswrapper[4691]: I1124 08:24:36.559654 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-dphxx"] Nov 24 08:24:36 crc kubenswrapper[4691]: I1124 08:24:36.590233 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-dphxx" event={"ID":"5368c577-e1f7-45bf-9102-4e5422934e63","Type":"ContainerStarted","Data":"4017a4a64e0bd3a7576ba3d8dbdc245d166916dad5c6c8e6e594509e20bd2a0c"} Nov 24 08:24:37 crc kubenswrapper[4691]: I1124 08:24:37.602778 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-dphxx" event={"ID":"5368c577-e1f7-45bf-9102-4e5422934e63","Type":"ContainerStarted","Data":"c4dafea89004aeedd8072020e4cee19e9fa23715a853d51588f1d406085d3532"} Nov 24 08:24:37 crc kubenswrapper[4691]: I1124 08:24:37.640223 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-dphxx" podStartSLOduration=2.206082642 podStartE2EDuration="2.640182536s" podCreationTimestamp="2025-11-24 08:24:35 +0000 UTC" firstStartedPulling="2025-11-24 08:24:36.557612558 +0000 UTC m=+1638.556561807" lastFinishedPulling="2025-11-24 08:24:36.991712452 +0000 UTC m=+1638.990661701" observedRunningTime="2025-11-24 08:24:37.621623678 +0000 UTC m=+1639.620572967" watchObservedRunningTime="2025-11-24 08:24:37.640182536 +0000 UTC m=+1639.639131825" Nov 24 08:24:41 crc kubenswrapper[4691]: I1124 08:24:41.760939 4691 scope.go:117] "RemoveContainer" containerID="baafdc5484696f8a6ed3299009fbc07f5ebbc71075e6742f1979ca5e2e2084cf" Nov 24 08:24:41 crc kubenswrapper[4691]: E1124 08:24:41.761882 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:24:53 crc kubenswrapper[4691]: I1124 08:24:53.761411 4691 scope.go:117] "RemoveContainer" containerID="baafdc5484696f8a6ed3299009fbc07f5ebbc71075e6742f1979ca5e2e2084cf" Nov 24 08:24:53 crc kubenswrapper[4691]: E1124 08:24:53.762406 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:25:00 crc kubenswrapper[4691]: I1124 08:25:00.065158 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-hs4vf"] Nov 24 08:25:00 crc kubenswrapper[4691]: I1124 08:25:00.091388 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-857b-account-create-trvmc"] Nov 24 08:25:00 crc kubenswrapper[4691]: I1124 08:25:00.100945 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-wqkdf"] Nov 24 08:25:00 crc kubenswrapper[4691]: I1124 08:25:00.107628 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-5ab9-account-create-xpsht"] Nov 24 08:25:00 crc kubenswrapper[4691]: I1124 08:25:00.114904 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-5ab9-account-create-xpsht"] Nov 24 08:25:00 crc kubenswrapper[4691]: I1124 08:25:00.125964 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-857b-account-create-trvmc"] Nov 24 08:25:00 crc kubenswrapper[4691]: I1124 08:25:00.135723 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-hs4vf"] Nov 24 08:25:00 crc kubenswrapper[4691]: I1124 08:25:00.147252 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-wqkdf"] Nov 24 08:25:00 crc kubenswrapper[4691]: I1124 08:25:00.157689 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-8wsc6"] Nov 24 08:25:00 crc kubenswrapper[4691]: I1124 08:25:00.167085 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-8wsc6"] Nov 24 08:25:00 crc kubenswrapper[4691]: I1124 08:25:00.176656 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-8a87-account-create-kq8md"] Nov 24 08:25:00 crc kubenswrapper[4691]: I1124 08:25:00.184735 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-8a87-account-create-kq8md"] Nov 24 08:25:00 crc kubenswrapper[4691]: I1124 08:25:00.776671 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="11d10376-5f07-4a8f-bca5-6ee8172f886f" path="/var/lib/kubelet/pods/11d10376-5f07-4a8f-bca5-6ee8172f886f/volumes" Nov 24 08:25:00 crc kubenswrapper[4691]: I1124 08:25:00.777385 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a07bbfa-86c4-40b0-aebd-684592d41663" path="/var/lib/kubelet/pods/3a07bbfa-86c4-40b0-aebd-684592d41663/volumes" Nov 24 08:25:00 crc kubenswrapper[4691]: I1124 08:25:00.778353 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4782846a-9f12-486c-b56f-137ef67dc92c" path="/var/lib/kubelet/pods/4782846a-9f12-486c-b56f-137ef67dc92c/volumes" Nov 24 08:25:00 crc kubenswrapper[4691]: I1124 08:25:00.779097 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="543a3fab-95e8-46c7-b566-b6b394749681" path="/var/lib/kubelet/pods/543a3fab-95e8-46c7-b566-b6b394749681/volumes" Nov 24 08:25:00 crc kubenswrapper[4691]: I1124 08:25:00.780626 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc91f7cc-3f75-46d8-b521-30dc169ab022" path="/var/lib/kubelet/pods/cc91f7cc-3f75-46d8-b521-30dc169ab022/volumes" Nov 24 08:25:00 crc kubenswrapper[4691]: I1124 08:25:00.781349 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db6ac827-d728-40ca-bc93-b8f406242a9d" path="/var/lib/kubelet/pods/db6ac827-d728-40ca-bc93-b8f406242a9d/volumes" Nov 24 08:25:06 crc kubenswrapper[4691]: I1124 08:25:06.760653 4691 scope.go:117] "RemoveContainer" containerID="baafdc5484696f8a6ed3299009fbc07f5ebbc71075e6742f1979ca5e2e2084cf" Nov 24 08:25:06 crc kubenswrapper[4691]: E1124 08:25:06.761915 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:25:18 crc kubenswrapper[4691]: I1124 08:25:18.030264 4691 generic.go:334] "Generic (PLEG): container finished" podID="5368c577-e1f7-45bf-9102-4e5422934e63" containerID="c4dafea89004aeedd8072020e4cee19e9fa23715a853d51588f1d406085d3532" exitCode=0 Nov 24 08:25:18 crc kubenswrapper[4691]: I1124 08:25:18.030486 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-dphxx" event={"ID":"5368c577-e1f7-45bf-9102-4e5422934e63","Type":"ContainerDied","Data":"c4dafea89004aeedd8072020e4cee19e9fa23715a853d51588f1d406085d3532"} Nov 24 08:25:19 crc kubenswrapper[4691]: I1124 08:25:19.452314 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-dphxx" Nov 24 08:25:19 crc kubenswrapper[4691]: I1124 08:25:19.643374 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5368c577-e1f7-45bf-9102-4e5422934e63-ssh-key\") pod \"5368c577-e1f7-45bf-9102-4e5422934e63\" (UID: \"5368c577-e1f7-45bf-9102-4e5422934e63\") " Nov 24 08:25:19 crc kubenswrapper[4691]: I1124 08:25:19.643428 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5368c577-e1f7-45bf-9102-4e5422934e63-inventory\") pod \"5368c577-e1f7-45bf-9102-4e5422934e63\" (UID: \"5368c577-e1f7-45bf-9102-4e5422934e63\") " Nov 24 08:25:19 crc kubenswrapper[4691]: I1124 08:25:19.643585 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vhtfr\" (UniqueName: \"kubernetes.io/projected/5368c577-e1f7-45bf-9102-4e5422934e63-kube-api-access-vhtfr\") pod \"5368c577-e1f7-45bf-9102-4e5422934e63\" (UID: \"5368c577-e1f7-45bf-9102-4e5422934e63\") " Nov 24 08:25:19 crc kubenswrapper[4691]: I1124 08:25:19.653890 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5368c577-e1f7-45bf-9102-4e5422934e63-kube-api-access-vhtfr" (OuterVolumeSpecName: "kube-api-access-vhtfr") pod "5368c577-e1f7-45bf-9102-4e5422934e63" (UID: "5368c577-e1f7-45bf-9102-4e5422934e63"). InnerVolumeSpecName "kube-api-access-vhtfr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:25:19 crc kubenswrapper[4691]: I1124 08:25:19.671204 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5368c577-e1f7-45bf-9102-4e5422934e63-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "5368c577-e1f7-45bf-9102-4e5422934e63" (UID: "5368c577-e1f7-45bf-9102-4e5422934e63"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:25:19 crc kubenswrapper[4691]: I1124 08:25:19.678484 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5368c577-e1f7-45bf-9102-4e5422934e63-inventory" (OuterVolumeSpecName: "inventory") pod "5368c577-e1f7-45bf-9102-4e5422934e63" (UID: "5368c577-e1f7-45bf-9102-4e5422934e63"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:25:19 crc kubenswrapper[4691]: I1124 08:25:19.746312 4691 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5368c577-e1f7-45bf-9102-4e5422934e63-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 08:25:19 crc kubenswrapper[4691]: I1124 08:25:19.747679 4691 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5368c577-e1f7-45bf-9102-4e5422934e63-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 08:25:19 crc kubenswrapper[4691]: I1124 08:25:19.747710 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vhtfr\" (UniqueName: \"kubernetes.io/projected/5368c577-e1f7-45bf-9102-4e5422934e63-kube-api-access-vhtfr\") on node \"crc\" DevicePath \"\"" Nov 24 08:25:20 crc kubenswrapper[4691]: I1124 08:25:20.052410 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-dphxx" event={"ID":"5368c577-e1f7-45bf-9102-4e5422934e63","Type":"ContainerDied","Data":"4017a4a64e0bd3a7576ba3d8dbdc245d166916dad5c6c8e6e594509e20bd2a0c"} Nov 24 08:25:20 crc kubenswrapper[4691]: I1124 08:25:20.052512 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4017a4a64e0bd3a7576ba3d8dbdc245d166916dad5c6c8e6e594509e20bd2a0c" Nov 24 08:25:20 crc kubenswrapper[4691]: I1124 08:25:20.052466 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-dphxx" Nov 24 08:25:20 crc kubenswrapper[4691]: I1124 08:25:20.177794 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sgd4k"] Nov 24 08:25:20 crc kubenswrapper[4691]: E1124 08:25:20.178288 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5368c577-e1f7-45bf-9102-4e5422934e63" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 24 08:25:20 crc kubenswrapper[4691]: I1124 08:25:20.178307 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="5368c577-e1f7-45bf-9102-4e5422934e63" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 24 08:25:20 crc kubenswrapper[4691]: I1124 08:25:20.178633 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="5368c577-e1f7-45bf-9102-4e5422934e63" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 24 08:25:20 crc kubenswrapper[4691]: I1124 08:25:20.179392 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sgd4k" Nov 24 08:25:20 crc kubenswrapper[4691]: I1124 08:25:20.183244 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 08:25:20 crc kubenswrapper[4691]: I1124 08:25:20.183508 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-l69jd" Nov 24 08:25:20 crc kubenswrapper[4691]: I1124 08:25:20.187040 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 08:25:20 crc kubenswrapper[4691]: I1124 08:25:20.187384 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 08:25:20 crc kubenswrapper[4691]: I1124 08:25:20.189640 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sgd4k"] Nov 24 08:25:20 crc kubenswrapper[4691]: I1124 08:25:20.269703 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1ac65fef-8c31-48ea-9715-9245e9dd717e-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sgd4k\" (UID: \"1ac65fef-8c31-48ea-9715-9245e9dd717e\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sgd4k" Nov 24 08:25:20 crc kubenswrapper[4691]: I1124 08:25:20.269937 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w97kj\" (UniqueName: \"kubernetes.io/projected/1ac65fef-8c31-48ea-9715-9245e9dd717e-kube-api-access-w97kj\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sgd4k\" (UID: \"1ac65fef-8c31-48ea-9715-9245e9dd717e\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sgd4k" Nov 24 08:25:20 crc kubenswrapper[4691]: I1124 08:25:20.270052 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1ac65fef-8c31-48ea-9715-9245e9dd717e-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sgd4k\" (UID: \"1ac65fef-8c31-48ea-9715-9245e9dd717e\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sgd4k" Nov 24 08:25:20 crc kubenswrapper[4691]: I1124 08:25:20.372401 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1ac65fef-8c31-48ea-9715-9245e9dd717e-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sgd4k\" (UID: \"1ac65fef-8c31-48ea-9715-9245e9dd717e\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sgd4k" Nov 24 08:25:20 crc kubenswrapper[4691]: I1124 08:25:20.372565 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1ac65fef-8c31-48ea-9715-9245e9dd717e-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sgd4k\" (UID: \"1ac65fef-8c31-48ea-9715-9245e9dd717e\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sgd4k" Nov 24 08:25:20 crc kubenswrapper[4691]: I1124 08:25:20.372696 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w97kj\" (UniqueName: \"kubernetes.io/projected/1ac65fef-8c31-48ea-9715-9245e9dd717e-kube-api-access-w97kj\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sgd4k\" (UID: \"1ac65fef-8c31-48ea-9715-9245e9dd717e\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sgd4k" Nov 24 08:25:20 crc kubenswrapper[4691]: I1124 08:25:20.379530 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1ac65fef-8c31-48ea-9715-9245e9dd717e-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sgd4k\" (UID: \"1ac65fef-8c31-48ea-9715-9245e9dd717e\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sgd4k" Nov 24 08:25:20 crc kubenswrapper[4691]: I1124 08:25:20.380579 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1ac65fef-8c31-48ea-9715-9245e9dd717e-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sgd4k\" (UID: \"1ac65fef-8c31-48ea-9715-9245e9dd717e\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sgd4k" Nov 24 08:25:20 crc kubenswrapper[4691]: I1124 08:25:20.391801 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w97kj\" (UniqueName: \"kubernetes.io/projected/1ac65fef-8c31-48ea-9715-9245e9dd717e-kube-api-access-w97kj\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sgd4k\" (UID: \"1ac65fef-8c31-48ea-9715-9245e9dd717e\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sgd4k" Nov 24 08:25:20 crc kubenswrapper[4691]: I1124 08:25:20.498066 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sgd4k" Nov 24 08:25:20 crc kubenswrapper[4691]: I1124 08:25:20.760947 4691 scope.go:117] "RemoveContainer" containerID="baafdc5484696f8a6ed3299009fbc07f5ebbc71075e6742f1979ca5e2e2084cf" Nov 24 08:25:20 crc kubenswrapper[4691]: E1124 08:25:20.761669 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:25:21 crc kubenswrapper[4691]: I1124 08:25:21.077812 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sgd4k"] Nov 24 08:25:22 crc kubenswrapper[4691]: I1124 08:25:22.079145 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sgd4k" event={"ID":"1ac65fef-8c31-48ea-9715-9245e9dd717e","Type":"ContainerStarted","Data":"5fcc7733634044d4fba3500399255bd48892e3815241d36b8f47c78675bb4fba"} Nov 24 08:25:22 crc kubenswrapper[4691]: I1124 08:25:22.079803 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sgd4k" event={"ID":"1ac65fef-8c31-48ea-9715-9245e9dd717e","Type":"ContainerStarted","Data":"a5be706c487658cc2df5759913dbfbd548d12c992d47232c7d2dcfb4a7b03383"} Nov 24 08:25:22 crc kubenswrapper[4691]: I1124 08:25:22.107134 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sgd4k" podStartSLOduration=1.562632903 podStartE2EDuration="2.107112994s" podCreationTimestamp="2025-11-24 08:25:20 +0000 UTC" firstStartedPulling="2025-11-24 08:25:21.093442812 +0000 UTC m=+1683.092392071" lastFinishedPulling="2025-11-24 08:25:21.637922903 +0000 UTC m=+1683.636872162" observedRunningTime="2025-11-24 08:25:22.104980332 +0000 UTC m=+1684.103929611" watchObservedRunningTime="2025-11-24 08:25:22.107112994 +0000 UTC m=+1684.106062253" Nov 24 08:25:22 crc kubenswrapper[4691]: I1124 08:25:22.564198 4691 scope.go:117] "RemoveContainer" containerID="dc60e967a5d3c1cda82c0f4a142fdfc691252fceb633a7b4604dff5f228d8dae" Nov 24 08:25:22 crc kubenswrapper[4691]: I1124 08:25:22.602310 4691 scope.go:117] "RemoveContainer" containerID="8fcb2c412e8404c7f61a375310d5c081a8815d57f772be72737350a68295cc4c" Nov 24 08:25:22 crc kubenswrapper[4691]: I1124 08:25:22.648830 4691 scope.go:117] "RemoveContainer" containerID="a00c7a3fe3bd72ac333731e2f13812945fca1d80e299ee94e2c1ae2fb4863623" Nov 24 08:25:22 crc kubenswrapper[4691]: I1124 08:25:22.686886 4691 scope.go:117] "RemoveContainer" containerID="7acc3b98a4aa8a1697f89b8a44664f41e04b87a6a59c86b14d3319e407aac1e2" Nov 24 08:25:22 crc kubenswrapper[4691]: I1124 08:25:22.734070 4691 scope.go:117] "RemoveContainer" containerID="d517adef1acf08ab50499d32756566d943d9c6d06134e62a3cbeb7c9bf2a5839" Nov 24 08:25:22 crc kubenswrapper[4691]: I1124 08:25:22.772053 4691 scope.go:117] "RemoveContainer" containerID="cf4449bd40f19dcc5d0aea32234b0bb5d801fb3c2c4718842a799322d0435d7d" Nov 24 08:25:23 crc kubenswrapper[4691]: I1124 08:25:23.046896 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-5bh8t"] Nov 24 08:25:23 crc kubenswrapper[4691]: I1124 08:25:23.059548 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-5bh8t"] Nov 24 08:25:24 crc kubenswrapper[4691]: I1124 08:25:24.775198 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="35cf516f-ba01-46fe-97d5-36ae7b99f35e" path="/var/lib/kubelet/pods/35cf516f-ba01-46fe-97d5-36ae7b99f35e/volumes" Nov 24 08:25:31 crc kubenswrapper[4691]: I1124 08:25:31.760895 4691 scope.go:117] "RemoveContainer" containerID="baafdc5484696f8a6ed3299009fbc07f5ebbc71075e6742f1979ca5e2e2084cf" Nov 24 08:25:31 crc kubenswrapper[4691]: E1124 08:25:31.761649 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:25:43 crc kubenswrapper[4691]: I1124 08:25:43.761191 4691 scope.go:117] "RemoveContainer" containerID="baafdc5484696f8a6ed3299009fbc07f5ebbc71075e6742f1979ca5e2e2084cf" Nov 24 08:25:43 crc kubenswrapper[4691]: E1124 08:25:43.762392 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:25:47 crc kubenswrapper[4691]: I1124 08:25:47.054220 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-kkxd2"] Nov 24 08:25:47 crc kubenswrapper[4691]: I1124 08:25:47.062646 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-t62jq"] Nov 24 08:25:47 crc kubenswrapper[4691]: I1124 08:25:47.075331 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-t62jq"] Nov 24 08:25:47 crc kubenswrapper[4691]: I1124 08:25:47.088860 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-kkxd2"] Nov 24 08:25:48 crc kubenswrapper[4691]: I1124 08:25:48.782388 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c4c6180-da05-43a3-9f3b-e689e86cb2ac" path="/var/lib/kubelet/pods/3c4c6180-da05-43a3-9f3b-e689e86cb2ac/volumes" Nov 24 08:25:48 crc kubenswrapper[4691]: I1124 08:25:48.784026 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f83a1f39-5338-46a1-96b8-384c34957916" path="/var/lib/kubelet/pods/f83a1f39-5338-46a1-96b8-384c34957916/volumes" Nov 24 08:25:56 crc kubenswrapper[4691]: I1124 08:25:56.761747 4691 scope.go:117] "RemoveContainer" containerID="baafdc5484696f8a6ed3299009fbc07f5ebbc71075e6742f1979ca5e2e2084cf" Nov 24 08:25:56 crc kubenswrapper[4691]: E1124 08:25:56.762912 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:26:07 crc kubenswrapper[4691]: I1124 08:26:07.761065 4691 scope.go:117] "RemoveContainer" containerID="baafdc5484696f8a6ed3299009fbc07f5ebbc71075e6742f1979ca5e2e2084cf" Nov 24 08:26:07 crc kubenswrapper[4691]: E1124 08:26:07.762038 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:26:19 crc kubenswrapper[4691]: I1124 08:26:19.707797 4691 generic.go:334] "Generic (PLEG): container finished" podID="1ac65fef-8c31-48ea-9715-9245e9dd717e" containerID="5fcc7733634044d4fba3500399255bd48892e3815241d36b8f47c78675bb4fba" exitCode=0 Nov 24 08:26:19 crc kubenswrapper[4691]: I1124 08:26:19.707908 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sgd4k" event={"ID":"1ac65fef-8c31-48ea-9715-9245e9dd717e","Type":"ContainerDied","Data":"5fcc7733634044d4fba3500399255bd48892e3815241d36b8f47c78675bb4fba"} Nov 24 08:26:19 crc kubenswrapper[4691]: I1124 08:26:19.760103 4691 scope.go:117] "RemoveContainer" containerID="baafdc5484696f8a6ed3299009fbc07f5ebbc71075e6742f1979ca5e2e2084cf" Nov 24 08:26:19 crc kubenswrapper[4691]: E1124 08:26:19.760345 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:26:21 crc kubenswrapper[4691]: I1124 08:26:21.126681 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sgd4k" Nov 24 08:26:21 crc kubenswrapper[4691]: I1124 08:26:21.301372 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1ac65fef-8c31-48ea-9715-9245e9dd717e-ssh-key\") pod \"1ac65fef-8c31-48ea-9715-9245e9dd717e\" (UID: \"1ac65fef-8c31-48ea-9715-9245e9dd717e\") " Nov 24 08:26:21 crc kubenswrapper[4691]: I1124 08:26:21.302075 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1ac65fef-8c31-48ea-9715-9245e9dd717e-inventory\") pod \"1ac65fef-8c31-48ea-9715-9245e9dd717e\" (UID: \"1ac65fef-8c31-48ea-9715-9245e9dd717e\") " Nov 24 08:26:21 crc kubenswrapper[4691]: I1124 08:26:21.302142 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w97kj\" (UniqueName: \"kubernetes.io/projected/1ac65fef-8c31-48ea-9715-9245e9dd717e-kube-api-access-w97kj\") pod \"1ac65fef-8c31-48ea-9715-9245e9dd717e\" (UID: \"1ac65fef-8c31-48ea-9715-9245e9dd717e\") " Nov 24 08:26:21 crc kubenswrapper[4691]: I1124 08:26:21.309131 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ac65fef-8c31-48ea-9715-9245e9dd717e-kube-api-access-w97kj" (OuterVolumeSpecName: "kube-api-access-w97kj") pod "1ac65fef-8c31-48ea-9715-9245e9dd717e" (UID: "1ac65fef-8c31-48ea-9715-9245e9dd717e"). InnerVolumeSpecName "kube-api-access-w97kj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:26:21 crc kubenswrapper[4691]: I1124 08:26:21.333779 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ac65fef-8c31-48ea-9715-9245e9dd717e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "1ac65fef-8c31-48ea-9715-9245e9dd717e" (UID: "1ac65fef-8c31-48ea-9715-9245e9dd717e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:26:21 crc kubenswrapper[4691]: I1124 08:26:21.334605 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ac65fef-8c31-48ea-9715-9245e9dd717e-inventory" (OuterVolumeSpecName: "inventory") pod "1ac65fef-8c31-48ea-9715-9245e9dd717e" (UID: "1ac65fef-8c31-48ea-9715-9245e9dd717e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:26:21 crc kubenswrapper[4691]: I1124 08:26:21.404393 4691 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1ac65fef-8c31-48ea-9715-9245e9dd717e-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 08:26:21 crc kubenswrapper[4691]: I1124 08:26:21.404433 4691 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1ac65fef-8c31-48ea-9715-9245e9dd717e-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 08:26:21 crc kubenswrapper[4691]: I1124 08:26:21.404455 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w97kj\" (UniqueName: \"kubernetes.io/projected/1ac65fef-8c31-48ea-9715-9245e9dd717e-kube-api-access-w97kj\") on node \"crc\" DevicePath \"\"" Nov 24 08:26:21 crc kubenswrapper[4691]: I1124 08:26:21.741830 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sgd4k" event={"ID":"1ac65fef-8c31-48ea-9715-9245e9dd717e","Type":"ContainerDied","Data":"a5be706c487658cc2df5759913dbfbd548d12c992d47232c7d2dcfb4a7b03383"} Nov 24 08:26:21 crc kubenswrapper[4691]: I1124 08:26:21.741890 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a5be706c487658cc2df5759913dbfbd548d12c992d47232c7d2dcfb4a7b03383" Nov 24 08:26:21 crc kubenswrapper[4691]: I1124 08:26:21.741898 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sgd4k" Nov 24 08:26:21 crc kubenswrapper[4691]: I1124 08:26:21.836733 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-6tlcl"] Nov 24 08:26:21 crc kubenswrapper[4691]: E1124 08:26:21.837187 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ac65fef-8c31-48ea-9715-9245e9dd717e" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 24 08:26:21 crc kubenswrapper[4691]: I1124 08:26:21.837210 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ac65fef-8c31-48ea-9715-9245e9dd717e" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 24 08:26:21 crc kubenswrapper[4691]: I1124 08:26:21.837531 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ac65fef-8c31-48ea-9715-9245e9dd717e" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 24 08:26:21 crc kubenswrapper[4691]: I1124 08:26:21.838291 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-6tlcl" Nov 24 08:26:21 crc kubenswrapper[4691]: I1124 08:26:21.840337 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 08:26:21 crc kubenswrapper[4691]: I1124 08:26:21.840498 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-l69jd" Nov 24 08:26:21 crc kubenswrapper[4691]: I1124 08:26:21.840356 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 08:26:21 crc kubenswrapper[4691]: I1124 08:26:21.841183 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 08:26:21 crc kubenswrapper[4691]: I1124 08:26:21.858561 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-6tlcl"] Nov 24 08:26:22 crc kubenswrapper[4691]: I1124 08:26:22.016161 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ltt2f\" (UniqueName: \"kubernetes.io/projected/c5c8e953-d111-42cd-8930-ee2c8f4242dd-kube-api-access-ltt2f\") pod \"ssh-known-hosts-edpm-deployment-6tlcl\" (UID: \"c5c8e953-d111-42cd-8930-ee2c8f4242dd\") " pod="openstack/ssh-known-hosts-edpm-deployment-6tlcl" Nov 24 08:26:22 crc kubenswrapper[4691]: I1124 08:26:22.016259 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c5c8e953-d111-42cd-8930-ee2c8f4242dd-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-6tlcl\" (UID: \"c5c8e953-d111-42cd-8930-ee2c8f4242dd\") " pod="openstack/ssh-known-hosts-edpm-deployment-6tlcl" Nov 24 08:26:22 crc kubenswrapper[4691]: I1124 08:26:22.016310 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/c5c8e953-d111-42cd-8930-ee2c8f4242dd-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-6tlcl\" (UID: \"c5c8e953-d111-42cd-8930-ee2c8f4242dd\") " pod="openstack/ssh-known-hosts-edpm-deployment-6tlcl" Nov 24 08:26:22 crc kubenswrapper[4691]: I1124 08:26:22.117972 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c5c8e953-d111-42cd-8930-ee2c8f4242dd-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-6tlcl\" (UID: \"c5c8e953-d111-42cd-8930-ee2c8f4242dd\") " pod="openstack/ssh-known-hosts-edpm-deployment-6tlcl" Nov 24 08:26:22 crc kubenswrapper[4691]: I1124 08:26:22.118329 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/c5c8e953-d111-42cd-8930-ee2c8f4242dd-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-6tlcl\" (UID: \"c5c8e953-d111-42cd-8930-ee2c8f4242dd\") " pod="openstack/ssh-known-hosts-edpm-deployment-6tlcl" Nov 24 08:26:22 crc kubenswrapper[4691]: I1124 08:26:22.118553 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ltt2f\" (UniqueName: \"kubernetes.io/projected/c5c8e953-d111-42cd-8930-ee2c8f4242dd-kube-api-access-ltt2f\") pod \"ssh-known-hosts-edpm-deployment-6tlcl\" (UID: \"c5c8e953-d111-42cd-8930-ee2c8f4242dd\") " pod="openstack/ssh-known-hosts-edpm-deployment-6tlcl" Nov 24 08:26:22 crc kubenswrapper[4691]: I1124 08:26:22.121716 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/c5c8e953-d111-42cd-8930-ee2c8f4242dd-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-6tlcl\" (UID: \"c5c8e953-d111-42cd-8930-ee2c8f4242dd\") " pod="openstack/ssh-known-hosts-edpm-deployment-6tlcl" Nov 24 08:26:22 crc kubenswrapper[4691]: I1124 08:26:22.124424 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c5c8e953-d111-42cd-8930-ee2c8f4242dd-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-6tlcl\" (UID: \"c5c8e953-d111-42cd-8930-ee2c8f4242dd\") " pod="openstack/ssh-known-hosts-edpm-deployment-6tlcl" Nov 24 08:26:22 crc kubenswrapper[4691]: I1124 08:26:22.136973 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ltt2f\" (UniqueName: \"kubernetes.io/projected/c5c8e953-d111-42cd-8930-ee2c8f4242dd-kube-api-access-ltt2f\") pod \"ssh-known-hosts-edpm-deployment-6tlcl\" (UID: \"c5c8e953-d111-42cd-8930-ee2c8f4242dd\") " pod="openstack/ssh-known-hosts-edpm-deployment-6tlcl" Nov 24 08:26:22 crc kubenswrapper[4691]: I1124 08:26:22.155449 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-6tlcl" Nov 24 08:26:22 crc kubenswrapper[4691]: I1124 08:26:22.748207 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-6tlcl"] Nov 24 08:26:22 crc kubenswrapper[4691]: I1124 08:26:22.921308 4691 scope.go:117] "RemoveContainer" containerID="e3d6efccbbbeabf8dda8a717ad37242b942404986a4e0a65be4fb4ec8119fcb9" Nov 24 08:26:22 crc kubenswrapper[4691]: I1124 08:26:22.979146 4691 scope.go:117] "RemoveContainer" containerID="c01d57cb5ff8a868761f76d113b805887db7f7ae3f13bedc7eb1cde39b854ee6" Nov 24 08:26:23 crc kubenswrapper[4691]: I1124 08:26:23.041560 4691 scope.go:117] "RemoveContainer" containerID="16e51f596472d04c6f21e3412401f30f340be3f7006a2574d3e2522a7b3fe82c" Nov 24 08:26:23 crc kubenswrapper[4691]: I1124 08:26:23.762014 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-6tlcl" event={"ID":"c5c8e953-d111-42cd-8930-ee2c8f4242dd","Type":"ContainerStarted","Data":"882319e3472f764196289c1eb9ec042b7d01b209fa695ec05d6b7c5ad2fd4385"} Nov 24 08:26:23 crc kubenswrapper[4691]: I1124 08:26:23.762061 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-6tlcl" event={"ID":"c5c8e953-d111-42cd-8930-ee2c8f4242dd","Type":"ContainerStarted","Data":"d5db31a259e418f69a1b061a7d9bb6ae74b661ac4a6d52e2f87ac7412967b17f"} Nov 24 08:26:23 crc kubenswrapper[4691]: I1124 08:26:23.787112 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-6tlcl" podStartSLOduration=2.122862613 podStartE2EDuration="2.787088833s" podCreationTimestamp="2025-11-24 08:26:21 +0000 UTC" firstStartedPulling="2025-11-24 08:26:22.753833075 +0000 UTC m=+1744.752782314" lastFinishedPulling="2025-11-24 08:26:23.418059285 +0000 UTC m=+1745.417008534" observedRunningTime="2025-11-24 08:26:23.782049587 +0000 UTC m=+1745.780998826" watchObservedRunningTime="2025-11-24 08:26:23.787088833 +0000 UTC m=+1745.786038092" Nov 24 08:26:31 crc kubenswrapper[4691]: I1124 08:26:31.053722 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-6qk4g"] Nov 24 08:26:31 crc kubenswrapper[4691]: I1124 08:26:31.065581 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-6qk4g"] Nov 24 08:26:31 crc kubenswrapper[4691]: I1124 08:26:31.843881 4691 generic.go:334] "Generic (PLEG): container finished" podID="c5c8e953-d111-42cd-8930-ee2c8f4242dd" containerID="882319e3472f764196289c1eb9ec042b7d01b209fa695ec05d6b7c5ad2fd4385" exitCode=0 Nov 24 08:26:31 crc kubenswrapper[4691]: I1124 08:26:31.843965 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-6tlcl" event={"ID":"c5c8e953-d111-42cd-8930-ee2c8f4242dd","Type":"ContainerDied","Data":"882319e3472f764196289c1eb9ec042b7d01b209fa695ec05d6b7c5ad2fd4385"} Nov 24 08:26:32 crc kubenswrapper[4691]: I1124 08:26:32.761100 4691 scope.go:117] "RemoveContainer" containerID="baafdc5484696f8a6ed3299009fbc07f5ebbc71075e6742f1979ca5e2e2084cf" Nov 24 08:26:32 crc kubenswrapper[4691]: E1124 08:26:32.761931 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:26:32 crc kubenswrapper[4691]: I1124 08:26:32.778568 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a34df957-8938-4332-9781-bad870fa9531" path="/var/lib/kubelet/pods/a34df957-8938-4332-9781-bad870fa9531/volumes" Nov 24 08:26:33 crc kubenswrapper[4691]: I1124 08:26:33.339393 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-6tlcl" Nov 24 08:26:33 crc kubenswrapper[4691]: I1124 08:26:33.368971 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/c5c8e953-d111-42cd-8930-ee2c8f4242dd-inventory-0\") pod \"c5c8e953-d111-42cd-8930-ee2c8f4242dd\" (UID: \"c5c8e953-d111-42cd-8930-ee2c8f4242dd\") " Nov 24 08:26:33 crc kubenswrapper[4691]: I1124 08:26:33.369062 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ltt2f\" (UniqueName: \"kubernetes.io/projected/c5c8e953-d111-42cd-8930-ee2c8f4242dd-kube-api-access-ltt2f\") pod \"c5c8e953-d111-42cd-8930-ee2c8f4242dd\" (UID: \"c5c8e953-d111-42cd-8930-ee2c8f4242dd\") " Nov 24 08:26:33 crc kubenswrapper[4691]: I1124 08:26:33.369124 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c5c8e953-d111-42cd-8930-ee2c8f4242dd-ssh-key-openstack-edpm-ipam\") pod \"c5c8e953-d111-42cd-8930-ee2c8f4242dd\" (UID: \"c5c8e953-d111-42cd-8930-ee2c8f4242dd\") " Nov 24 08:26:33 crc kubenswrapper[4691]: I1124 08:26:33.374880 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5c8e953-d111-42cd-8930-ee2c8f4242dd-kube-api-access-ltt2f" (OuterVolumeSpecName: "kube-api-access-ltt2f") pod "c5c8e953-d111-42cd-8930-ee2c8f4242dd" (UID: "c5c8e953-d111-42cd-8930-ee2c8f4242dd"). InnerVolumeSpecName "kube-api-access-ltt2f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:26:33 crc kubenswrapper[4691]: I1124 08:26:33.407839 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5c8e953-d111-42cd-8930-ee2c8f4242dd-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "c5c8e953-d111-42cd-8930-ee2c8f4242dd" (UID: "c5c8e953-d111-42cd-8930-ee2c8f4242dd"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:26:33 crc kubenswrapper[4691]: I1124 08:26:33.419784 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5c8e953-d111-42cd-8930-ee2c8f4242dd-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "c5c8e953-d111-42cd-8930-ee2c8f4242dd" (UID: "c5c8e953-d111-42cd-8930-ee2c8f4242dd"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:26:33 crc kubenswrapper[4691]: I1124 08:26:33.470468 4691 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/c5c8e953-d111-42cd-8930-ee2c8f4242dd-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 24 08:26:33 crc kubenswrapper[4691]: I1124 08:26:33.470500 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ltt2f\" (UniqueName: \"kubernetes.io/projected/c5c8e953-d111-42cd-8930-ee2c8f4242dd-kube-api-access-ltt2f\") on node \"crc\" DevicePath \"\"" Nov 24 08:26:33 crc kubenswrapper[4691]: I1124 08:26:33.470511 4691 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c5c8e953-d111-42cd-8930-ee2c8f4242dd-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 24 08:26:33 crc kubenswrapper[4691]: I1124 08:26:33.863270 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-6tlcl" event={"ID":"c5c8e953-d111-42cd-8930-ee2c8f4242dd","Type":"ContainerDied","Data":"d5db31a259e418f69a1b061a7d9bb6ae74b661ac4a6d52e2f87ac7412967b17f"} Nov 24 08:26:33 crc kubenswrapper[4691]: I1124 08:26:33.863356 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d5db31a259e418f69a1b061a7d9bb6ae74b661ac4a6d52e2f87ac7412967b17f" Nov 24 08:26:33 crc kubenswrapper[4691]: I1124 08:26:33.864641 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-6tlcl" Nov 24 08:26:33 crc kubenswrapper[4691]: I1124 08:26:33.998301 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-fgbtw"] Nov 24 08:26:33 crc kubenswrapper[4691]: E1124 08:26:33.998848 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5c8e953-d111-42cd-8930-ee2c8f4242dd" containerName="ssh-known-hosts-edpm-deployment" Nov 24 08:26:33 crc kubenswrapper[4691]: I1124 08:26:33.998872 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5c8e953-d111-42cd-8930-ee2c8f4242dd" containerName="ssh-known-hosts-edpm-deployment" Nov 24 08:26:33 crc kubenswrapper[4691]: I1124 08:26:33.999130 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5c8e953-d111-42cd-8930-ee2c8f4242dd" containerName="ssh-known-hosts-edpm-deployment" Nov 24 08:26:33 crc kubenswrapper[4691]: I1124 08:26:33.999903 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fgbtw" Nov 24 08:26:34 crc kubenswrapper[4691]: I1124 08:26:34.001955 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 08:26:34 crc kubenswrapper[4691]: I1124 08:26:34.002248 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 08:26:34 crc kubenswrapper[4691]: I1124 08:26:34.002871 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-l69jd" Nov 24 08:26:34 crc kubenswrapper[4691]: I1124 08:26:34.003065 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 08:26:34 crc kubenswrapper[4691]: I1124 08:26:34.018963 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-fgbtw"] Nov 24 08:26:34 crc kubenswrapper[4691]: I1124 08:26:34.184116 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c306f14b-da97-42e1-87cc-612779e690e7-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-fgbtw\" (UID: \"c306f14b-da97-42e1-87cc-612779e690e7\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fgbtw" Nov 24 08:26:34 crc kubenswrapper[4691]: I1124 08:26:34.184199 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c306f14b-da97-42e1-87cc-612779e690e7-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-fgbtw\" (UID: \"c306f14b-da97-42e1-87cc-612779e690e7\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fgbtw" Nov 24 08:26:34 crc kubenswrapper[4691]: I1124 08:26:34.184275 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fq5cc\" (UniqueName: \"kubernetes.io/projected/c306f14b-da97-42e1-87cc-612779e690e7-kube-api-access-fq5cc\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-fgbtw\" (UID: \"c306f14b-da97-42e1-87cc-612779e690e7\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fgbtw" Nov 24 08:26:34 crc kubenswrapper[4691]: I1124 08:26:34.286797 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c306f14b-da97-42e1-87cc-612779e690e7-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-fgbtw\" (UID: \"c306f14b-da97-42e1-87cc-612779e690e7\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fgbtw" Nov 24 08:26:34 crc kubenswrapper[4691]: I1124 08:26:34.286911 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c306f14b-da97-42e1-87cc-612779e690e7-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-fgbtw\" (UID: \"c306f14b-da97-42e1-87cc-612779e690e7\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fgbtw" Nov 24 08:26:34 crc kubenswrapper[4691]: I1124 08:26:34.287027 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fq5cc\" (UniqueName: \"kubernetes.io/projected/c306f14b-da97-42e1-87cc-612779e690e7-kube-api-access-fq5cc\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-fgbtw\" (UID: \"c306f14b-da97-42e1-87cc-612779e690e7\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fgbtw" Nov 24 08:26:34 crc kubenswrapper[4691]: I1124 08:26:34.291054 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c306f14b-da97-42e1-87cc-612779e690e7-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-fgbtw\" (UID: \"c306f14b-da97-42e1-87cc-612779e690e7\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fgbtw" Nov 24 08:26:34 crc kubenswrapper[4691]: I1124 08:26:34.291302 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c306f14b-da97-42e1-87cc-612779e690e7-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-fgbtw\" (UID: \"c306f14b-da97-42e1-87cc-612779e690e7\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fgbtw" Nov 24 08:26:34 crc kubenswrapper[4691]: I1124 08:26:34.308653 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fq5cc\" (UniqueName: \"kubernetes.io/projected/c306f14b-da97-42e1-87cc-612779e690e7-kube-api-access-fq5cc\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-fgbtw\" (UID: \"c306f14b-da97-42e1-87cc-612779e690e7\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fgbtw" Nov 24 08:26:34 crc kubenswrapper[4691]: I1124 08:26:34.315584 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fgbtw" Nov 24 08:26:34 crc kubenswrapper[4691]: I1124 08:26:34.854146 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-fgbtw"] Nov 24 08:26:34 crc kubenswrapper[4691]: I1124 08:26:34.891432 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fgbtw" event={"ID":"c306f14b-da97-42e1-87cc-612779e690e7","Type":"ContainerStarted","Data":"f9ef61647016ecd5faef37a91580780dc2295d016447baf2f48acf659edb8e41"} Nov 24 08:26:35 crc kubenswrapper[4691]: I1124 08:26:35.902535 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fgbtw" event={"ID":"c306f14b-da97-42e1-87cc-612779e690e7","Type":"ContainerStarted","Data":"e973b776d0b94184948dcbb197eee61fa6f2be87a51db0602b18ec7b5ad4016a"} Nov 24 08:26:35 crc kubenswrapper[4691]: I1124 08:26:35.930048 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fgbtw" podStartSLOduration=2.497045933 podStartE2EDuration="2.930028365s" podCreationTimestamp="2025-11-24 08:26:33 +0000 UTC" firstStartedPulling="2025-11-24 08:26:34.872122042 +0000 UTC m=+1756.871071301" lastFinishedPulling="2025-11-24 08:26:35.305104454 +0000 UTC m=+1757.304053733" observedRunningTime="2025-11-24 08:26:35.917730309 +0000 UTC m=+1757.916679598" watchObservedRunningTime="2025-11-24 08:26:35.930028365 +0000 UTC m=+1757.928977624" Nov 24 08:26:43 crc kubenswrapper[4691]: I1124 08:26:43.762413 4691 scope.go:117] "RemoveContainer" containerID="baafdc5484696f8a6ed3299009fbc07f5ebbc71075e6742f1979ca5e2e2084cf" Nov 24 08:26:43 crc kubenswrapper[4691]: E1124 08:26:43.766439 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:26:43 crc kubenswrapper[4691]: I1124 08:26:43.994424 4691 generic.go:334] "Generic (PLEG): container finished" podID="c306f14b-da97-42e1-87cc-612779e690e7" containerID="e973b776d0b94184948dcbb197eee61fa6f2be87a51db0602b18ec7b5ad4016a" exitCode=0 Nov 24 08:26:43 crc kubenswrapper[4691]: I1124 08:26:43.994503 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fgbtw" event={"ID":"c306f14b-da97-42e1-87cc-612779e690e7","Type":"ContainerDied","Data":"e973b776d0b94184948dcbb197eee61fa6f2be87a51db0602b18ec7b5ad4016a"} Nov 24 08:26:45 crc kubenswrapper[4691]: I1124 08:26:45.418393 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fgbtw" Nov 24 08:26:45 crc kubenswrapper[4691]: I1124 08:26:45.459484 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c306f14b-da97-42e1-87cc-612779e690e7-ssh-key\") pod \"c306f14b-da97-42e1-87cc-612779e690e7\" (UID: \"c306f14b-da97-42e1-87cc-612779e690e7\") " Nov 24 08:26:45 crc kubenswrapper[4691]: I1124 08:26:45.459606 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fq5cc\" (UniqueName: \"kubernetes.io/projected/c306f14b-da97-42e1-87cc-612779e690e7-kube-api-access-fq5cc\") pod \"c306f14b-da97-42e1-87cc-612779e690e7\" (UID: \"c306f14b-da97-42e1-87cc-612779e690e7\") " Nov 24 08:26:45 crc kubenswrapper[4691]: I1124 08:26:45.459642 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c306f14b-da97-42e1-87cc-612779e690e7-inventory\") pod \"c306f14b-da97-42e1-87cc-612779e690e7\" (UID: \"c306f14b-da97-42e1-87cc-612779e690e7\") " Nov 24 08:26:45 crc kubenswrapper[4691]: I1124 08:26:45.467145 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c306f14b-da97-42e1-87cc-612779e690e7-kube-api-access-fq5cc" (OuterVolumeSpecName: "kube-api-access-fq5cc") pod "c306f14b-da97-42e1-87cc-612779e690e7" (UID: "c306f14b-da97-42e1-87cc-612779e690e7"). InnerVolumeSpecName "kube-api-access-fq5cc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:26:45 crc kubenswrapper[4691]: I1124 08:26:45.493393 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c306f14b-da97-42e1-87cc-612779e690e7-inventory" (OuterVolumeSpecName: "inventory") pod "c306f14b-da97-42e1-87cc-612779e690e7" (UID: "c306f14b-da97-42e1-87cc-612779e690e7"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:26:45 crc kubenswrapper[4691]: I1124 08:26:45.512360 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c306f14b-da97-42e1-87cc-612779e690e7-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c306f14b-da97-42e1-87cc-612779e690e7" (UID: "c306f14b-da97-42e1-87cc-612779e690e7"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:26:45 crc kubenswrapper[4691]: I1124 08:26:45.562328 4691 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c306f14b-da97-42e1-87cc-612779e690e7-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 08:26:45 crc kubenswrapper[4691]: I1124 08:26:45.562366 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fq5cc\" (UniqueName: \"kubernetes.io/projected/c306f14b-da97-42e1-87cc-612779e690e7-kube-api-access-fq5cc\") on node \"crc\" DevicePath \"\"" Nov 24 08:26:45 crc kubenswrapper[4691]: I1124 08:26:45.562380 4691 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c306f14b-da97-42e1-87cc-612779e690e7-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 08:26:46 crc kubenswrapper[4691]: I1124 08:26:46.057321 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fgbtw" event={"ID":"c306f14b-da97-42e1-87cc-612779e690e7","Type":"ContainerDied","Data":"f9ef61647016ecd5faef37a91580780dc2295d016447baf2f48acf659edb8e41"} Nov 24 08:26:46 crc kubenswrapper[4691]: I1124 08:26:46.057623 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f9ef61647016ecd5faef37a91580780dc2295d016447baf2f48acf659edb8e41" Nov 24 08:26:46 crc kubenswrapper[4691]: I1124 08:26:46.057882 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-fgbtw" Nov 24 08:26:46 crc kubenswrapper[4691]: I1124 08:26:46.113585 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-d5v6d"] Nov 24 08:26:46 crc kubenswrapper[4691]: E1124 08:26:46.114109 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c306f14b-da97-42e1-87cc-612779e690e7" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 24 08:26:46 crc kubenswrapper[4691]: I1124 08:26:46.114129 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="c306f14b-da97-42e1-87cc-612779e690e7" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 24 08:26:46 crc kubenswrapper[4691]: I1124 08:26:46.114411 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="c306f14b-da97-42e1-87cc-612779e690e7" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 24 08:26:46 crc kubenswrapper[4691]: I1124 08:26:46.115322 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-d5v6d" Nov 24 08:26:46 crc kubenswrapper[4691]: I1124 08:26:46.118087 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-l69jd" Nov 24 08:26:46 crc kubenswrapper[4691]: I1124 08:26:46.118352 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 08:26:46 crc kubenswrapper[4691]: I1124 08:26:46.119162 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 08:26:46 crc kubenswrapper[4691]: I1124 08:26:46.119208 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 08:26:46 crc kubenswrapper[4691]: I1124 08:26:46.123259 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-d5v6d"] Nov 24 08:26:46 crc kubenswrapper[4691]: I1124 08:26:46.183484 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d9a70a19-1e34-4bf7-8b91-ed6df2838313-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-d5v6d\" (UID: \"d9a70a19-1e34-4bf7-8b91-ed6df2838313\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-d5v6d" Nov 24 08:26:46 crc kubenswrapper[4691]: I1124 08:26:46.183715 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d9a70a19-1e34-4bf7-8b91-ed6df2838313-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-d5v6d\" (UID: \"d9a70a19-1e34-4bf7-8b91-ed6df2838313\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-d5v6d" Nov 24 08:26:46 crc kubenswrapper[4691]: I1124 08:26:46.183768 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rwk8g\" (UniqueName: \"kubernetes.io/projected/d9a70a19-1e34-4bf7-8b91-ed6df2838313-kube-api-access-rwk8g\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-d5v6d\" (UID: \"d9a70a19-1e34-4bf7-8b91-ed6df2838313\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-d5v6d" Nov 24 08:26:46 crc kubenswrapper[4691]: I1124 08:26:46.286088 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d9a70a19-1e34-4bf7-8b91-ed6df2838313-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-d5v6d\" (UID: \"d9a70a19-1e34-4bf7-8b91-ed6df2838313\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-d5v6d" Nov 24 08:26:46 crc kubenswrapper[4691]: I1124 08:26:46.286539 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d9a70a19-1e34-4bf7-8b91-ed6df2838313-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-d5v6d\" (UID: \"d9a70a19-1e34-4bf7-8b91-ed6df2838313\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-d5v6d" Nov 24 08:26:46 crc kubenswrapper[4691]: I1124 08:26:46.286566 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rwk8g\" (UniqueName: \"kubernetes.io/projected/d9a70a19-1e34-4bf7-8b91-ed6df2838313-kube-api-access-rwk8g\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-d5v6d\" (UID: \"d9a70a19-1e34-4bf7-8b91-ed6df2838313\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-d5v6d" Nov 24 08:26:46 crc kubenswrapper[4691]: I1124 08:26:46.292392 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d9a70a19-1e34-4bf7-8b91-ed6df2838313-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-d5v6d\" (UID: \"d9a70a19-1e34-4bf7-8b91-ed6df2838313\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-d5v6d" Nov 24 08:26:46 crc kubenswrapper[4691]: I1124 08:26:46.292698 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d9a70a19-1e34-4bf7-8b91-ed6df2838313-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-d5v6d\" (UID: \"d9a70a19-1e34-4bf7-8b91-ed6df2838313\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-d5v6d" Nov 24 08:26:46 crc kubenswrapper[4691]: I1124 08:26:46.311183 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rwk8g\" (UniqueName: \"kubernetes.io/projected/d9a70a19-1e34-4bf7-8b91-ed6df2838313-kube-api-access-rwk8g\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-d5v6d\" (UID: \"d9a70a19-1e34-4bf7-8b91-ed6df2838313\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-d5v6d" Nov 24 08:26:46 crc kubenswrapper[4691]: I1124 08:26:46.455068 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-d5v6d" Nov 24 08:26:47 crc kubenswrapper[4691]: I1124 08:26:47.059203 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-d5v6d"] Nov 24 08:26:47 crc kubenswrapper[4691]: W1124 08:26:47.068904 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd9a70a19_1e34_4bf7_8b91_ed6df2838313.slice/crio-86576ee19884f2be25215f3f1e7c1ab6221b1f74460a728bb47aeec778104fbf WatchSource:0}: Error finding container 86576ee19884f2be25215f3f1e7c1ab6221b1f74460a728bb47aeec778104fbf: Status 404 returned error can't find the container with id 86576ee19884f2be25215f3f1e7c1ab6221b1f74460a728bb47aeec778104fbf Nov 24 08:26:48 crc kubenswrapper[4691]: I1124 08:26:48.079983 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-d5v6d" event={"ID":"d9a70a19-1e34-4bf7-8b91-ed6df2838313","Type":"ContainerStarted","Data":"1cd82dd2f34f500d991084c608f9bef287af43c5cd0b0150c65fdbe44591553f"} Nov 24 08:26:48 crc kubenswrapper[4691]: I1124 08:26:48.080405 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-d5v6d" event={"ID":"d9a70a19-1e34-4bf7-8b91-ed6df2838313","Type":"ContainerStarted","Data":"86576ee19884f2be25215f3f1e7c1ab6221b1f74460a728bb47aeec778104fbf"} Nov 24 08:26:48 crc kubenswrapper[4691]: I1124 08:26:48.111440 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-d5v6d" podStartSLOduration=1.658723538 podStartE2EDuration="2.111408651s" podCreationTimestamp="2025-11-24 08:26:46 +0000 UTC" firstStartedPulling="2025-11-24 08:26:47.072363334 +0000 UTC m=+1769.071312593" lastFinishedPulling="2025-11-24 08:26:47.525048417 +0000 UTC m=+1769.523997706" observedRunningTime="2025-11-24 08:26:48.101300218 +0000 UTC m=+1770.100249507" watchObservedRunningTime="2025-11-24 08:26:48.111408651 +0000 UTC m=+1770.110357940" Nov 24 08:26:58 crc kubenswrapper[4691]: I1124 08:26:58.182344 4691 generic.go:334] "Generic (PLEG): container finished" podID="d9a70a19-1e34-4bf7-8b91-ed6df2838313" containerID="1cd82dd2f34f500d991084c608f9bef287af43c5cd0b0150c65fdbe44591553f" exitCode=0 Nov 24 08:26:58 crc kubenswrapper[4691]: I1124 08:26:58.182407 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-d5v6d" event={"ID":"d9a70a19-1e34-4bf7-8b91-ed6df2838313","Type":"ContainerDied","Data":"1cd82dd2f34f500d991084c608f9bef287af43c5cd0b0150c65fdbe44591553f"} Nov 24 08:26:58 crc kubenswrapper[4691]: I1124 08:26:58.773050 4691 scope.go:117] "RemoveContainer" containerID="baafdc5484696f8a6ed3299009fbc07f5ebbc71075e6742f1979ca5e2e2084cf" Nov 24 08:26:58 crc kubenswrapper[4691]: E1124 08:26:58.773346 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:26:59 crc kubenswrapper[4691]: I1124 08:26:59.669232 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-d5v6d" Nov 24 08:26:59 crc kubenswrapper[4691]: I1124 08:26:59.762269 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d9a70a19-1e34-4bf7-8b91-ed6df2838313-inventory\") pod \"d9a70a19-1e34-4bf7-8b91-ed6df2838313\" (UID: \"d9a70a19-1e34-4bf7-8b91-ed6df2838313\") " Nov 24 08:26:59 crc kubenswrapper[4691]: I1124 08:26:59.762533 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d9a70a19-1e34-4bf7-8b91-ed6df2838313-ssh-key\") pod \"d9a70a19-1e34-4bf7-8b91-ed6df2838313\" (UID: \"d9a70a19-1e34-4bf7-8b91-ed6df2838313\") " Nov 24 08:26:59 crc kubenswrapper[4691]: I1124 08:26:59.762575 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rwk8g\" (UniqueName: \"kubernetes.io/projected/d9a70a19-1e34-4bf7-8b91-ed6df2838313-kube-api-access-rwk8g\") pod \"d9a70a19-1e34-4bf7-8b91-ed6df2838313\" (UID: \"d9a70a19-1e34-4bf7-8b91-ed6df2838313\") " Nov 24 08:26:59 crc kubenswrapper[4691]: I1124 08:26:59.767422 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d9a70a19-1e34-4bf7-8b91-ed6df2838313-kube-api-access-rwk8g" (OuterVolumeSpecName: "kube-api-access-rwk8g") pod "d9a70a19-1e34-4bf7-8b91-ed6df2838313" (UID: "d9a70a19-1e34-4bf7-8b91-ed6df2838313"). InnerVolumeSpecName "kube-api-access-rwk8g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:26:59 crc kubenswrapper[4691]: I1124 08:26:59.789708 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9a70a19-1e34-4bf7-8b91-ed6df2838313-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d9a70a19-1e34-4bf7-8b91-ed6df2838313" (UID: "d9a70a19-1e34-4bf7-8b91-ed6df2838313"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:26:59 crc kubenswrapper[4691]: I1124 08:26:59.801489 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9a70a19-1e34-4bf7-8b91-ed6df2838313-inventory" (OuterVolumeSpecName: "inventory") pod "d9a70a19-1e34-4bf7-8b91-ed6df2838313" (UID: "d9a70a19-1e34-4bf7-8b91-ed6df2838313"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:26:59 crc kubenswrapper[4691]: I1124 08:26:59.864152 4691 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d9a70a19-1e34-4bf7-8b91-ed6df2838313-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 08:26:59 crc kubenswrapper[4691]: I1124 08:26:59.864193 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rwk8g\" (UniqueName: \"kubernetes.io/projected/d9a70a19-1e34-4bf7-8b91-ed6df2838313-kube-api-access-rwk8g\") on node \"crc\" DevicePath \"\"" Nov 24 08:26:59 crc kubenswrapper[4691]: I1124 08:26:59.864206 4691 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d9a70a19-1e34-4bf7-8b91-ed6df2838313-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.207342 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-d5v6d" event={"ID":"d9a70a19-1e34-4bf7-8b91-ed6df2838313","Type":"ContainerDied","Data":"86576ee19884f2be25215f3f1e7c1ab6221b1f74460a728bb47aeec778104fbf"} Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.207385 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="86576ee19884f2be25215f3f1e7c1ab6221b1f74460a728bb47aeec778104fbf" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.207435 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-d5v6d" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.294387 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb"] Nov 24 08:27:00 crc kubenswrapper[4691]: E1124 08:27:00.294772 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9a70a19-1e34-4bf7-8b91-ed6df2838313" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.294790 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9a70a19-1e34-4bf7-8b91-ed6df2838313" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.295006 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9a70a19-1e34-4bf7-8b91-ed6df2838313" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.295681 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.301500 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.301545 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.301553 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.301626 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.302144 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.302497 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-l69jd" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.302672 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.303513 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.314386 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb"] Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.370766 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.370811 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p2v65\" (UniqueName: \"kubernetes.io/projected/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-kube-api-access-p2v65\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.370834 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.370859 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.370890 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.371037 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.371102 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.371240 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.371297 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.371356 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.371399 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.371546 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.371602 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.371656 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.472981 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.473028 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p2v65\" (UniqueName: \"kubernetes.io/projected/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-kube-api-access-p2v65\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.473058 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.473096 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.473136 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.473175 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.473199 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.473252 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.473364 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.473882 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.473910 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.473967 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.473996 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.474059 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.477479 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.477639 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.478246 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.478320 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.478805 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.478809 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.478854 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.479488 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.479973 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.480567 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.480820 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.483094 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.483356 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.497328 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p2v65\" (UniqueName: \"kubernetes.io/projected/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-kube-api-access-p2v65\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:00 crc kubenswrapper[4691]: I1124 08:27:00.613642 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:01 crc kubenswrapper[4691]: I1124 08:27:01.309395 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb"] Nov 24 08:27:02 crc kubenswrapper[4691]: I1124 08:27:02.227071 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" event={"ID":"81f9a1f9-0d85-4aff-a92f-93e8b36724ff","Type":"ContainerStarted","Data":"f0c536228cb7798fe8969d840ac60881f81bd728b9def6d72cf8a6e5acbd7864"} Nov 24 08:27:02 crc kubenswrapper[4691]: I1124 08:27:02.227486 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" event={"ID":"81f9a1f9-0d85-4aff-a92f-93e8b36724ff","Type":"ContainerStarted","Data":"e3573d2f64ea41280c0ac23cfb58bf73c7fedfb62becece6e576c8b1410ff749"} Nov 24 08:27:03 crc kubenswrapper[4691]: I1124 08:27:03.256280 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" podStartSLOduration=2.643182367 podStartE2EDuration="3.256256086s" podCreationTimestamp="2025-11-24 08:27:00 +0000 UTC" firstStartedPulling="2025-11-24 08:27:01.316406926 +0000 UTC m=+1783.315356175" lastFinishedPulling="2025-11-24 08:27:01.929480635 +0000 UTC m=+1783.928429894" observedRunningTime="2025-11-24 08:27:03.250509659 +0000 UTC m=+1785.249458918" watchObservedRunningTime="2025-11-24 08:27:03.256256086 +0000 UTC m=+1785.255205335" Nov 24 08:27:10 crc kubenswrapper[4691]: I1124 08:27:10.760661 4691 scope.go:117] "RemoveContainer" containerID="baafdc5484696f8a6ed3299009fbc07f5ebbc71075e6742f1979ca5e2e2084cf" Nov 24 08:27:10 crc kubenswrapper[4691]: E1124 08:27:10.761606 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:27:22 crc kubenswrapper[4691]: I1124 08:27:22.761020 4691 scope.go:117] "RemoveContainer" containerID="baafdc5484696f8a6ed3299009fbc07f5ebbc71075e6742f1979ca5e2e2084cf" Nov 24 08:27:22 crc kubenswrapper[4691]: E1124 08:27:22.762356 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:27:23 crc kubenswrapper[4691]: I1124 08:27:23.180536 4691 scope.go:117] "RemoveContainer" containerID="0c3932950cd16601648944e4aa4f993b998c36673ffdc4a799a689a4b5ab1595" Nov 24 08:27:35 crc kubenswrapper[4691]: I1124 08:27:35.760232 4691 scope.go:117] "RemoveContainer" containerID="baafdc5484696f8a6ed3299009fbc07f5ebbc71075e6742f1979ca5e2e2084cf" Nov 24 08:27:35 crc kubenswrapper[4691]: E1124 08:27:35.760899 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:27:42 crc kubenswrapper[4691]: I1124 08:27:42.607530 4691 generic.go:334] "Generic (PLEG): container finished" podID="81f9a1f9-0d85-4aff-a92f-93e8b36724ff" containerID="f0c536228cb7798fe8969d840ac60881f81bd728b9def6d72cf8a6e5acbd7864" exitCode=0 Nov 24 08:27:42 crc kubenswrapper[4691]: I1124 08:27:42.607810 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" event={"ID":"81f9a1f9-0d85-4aff-a92f-93e8b36724ff","Type":"ContainerDied","Data":"f0c536228cb7798fe8969d840ac60881f81bd728b9def6d72cf8a6e5acbd7864"} Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.077219 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.192258 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-ssh-key\") pod \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.192682 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-neutron-metadata-combined-ca-bundle\") pod \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.192744 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.192814 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-bootstrap-combined-ca-bundle\") pod \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.192851 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.192889 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-ovn-combined-ca-bundle\") pod \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.192963 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p2v65\" (UniqueName: \"kubernetes.io/projected/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-kube-api-access-p2v65\") pod \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.192993 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.193052 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-inventory\") pod \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.193081 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-telemetry-combined-ca-bundle\") pod \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.193130 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-repo-setup-combined-ca-bundle\") pod \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.193156 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-openstack-edpm-ipam-ovn-default-certs-0\") pod \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.193190 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-nova-combined-ca-bundle\") pod \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.193214 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-libvirt-combined-ca-bundle\") pod \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\" (UID: \"81f9a1f9-0d85-4aff-a92f-93e8b36724ff\") " Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.199319 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "81f9a1f9-0d85-4aff-a92f-93e8b36724ff" (UID: "81f9a1f9-0d85-4aff-a92f-93e8b36724ff"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.199607 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "81f9a1f9-0d85-4aff-a92f-93e8b36724ff" (UID: "81f9a1f9-0d85-4aff-a92f-93e8b36724ff"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.199917 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "81f9a1f9-0d85-4aff-a92f-93e8b36724ff" (UID: "81f9a1f9-0d85-4aff-a92f-93e8b36724ff"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.200008 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "81f9a1f9-0d85-4aff-a92f-93e8b36724ff" (UID: "81f9a1f9-0d85-4aff-a92f-93e8b36724ff"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.201545 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-kube-api-access-p2v65" (OuterVolumeSpecName: "kube-api-access-p2v65") pod "81f9a1f9-0d85-4aff-a92f-93e8b36724ff" (UID: "81f9a1f9-0d85-4aff-a92f-93e8b36724ff"). InnerVolumeSpecName "kube-api-access-p2v65". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.201551 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "81f9a1f9-0d85-4aff-a92f-93e8b36724ff" (UID: "81f9a1f9-0d85-4aff-a92f-93e8b36724ff"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.202060 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "81f9a1f9-0d85-4aff-a92f-93e8b36724ff" (UID: "81f9a1f9-0d85-4aff-a92f-93e8b36724ff"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.202536 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "81f9a1f9-0d85-4aff-a92f-93e8b36724ff" (UID: "81f9a1f9-0d85-4aff-a92f-93e8b36724ff"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.210659 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "81f9a1f9-0d85-4aff-a92f-93e8b36724ff" (UID: "81f9a1f9-0d85-4aff-a92f-93e8b36724ff"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.210731 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "81f9a1f9-0d85-4aff-a92f-93e8b36724ff" (UID: "81f9a1f9-0d85-4aff-a92f-93e8b36724ff"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.211506 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "81f9a1f9-0d85-4aff-a92f-93e8b36724ff" (UID: "81f9a1f9-0d85-4aff-a92f-93e8b36724ff"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.211712 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "81f9a1f9-0d85-4aff-a92f-93e8b36724ff" (UID: "81f9a1f9-0d85-4aff-a92f-93e8b36724ff"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.236988 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-inventory" (OuterVolumeSpecName: "inventory") pod "81f9a1f9-0d85-4aff-a92f-93e8b36724ff" (UID: "81f9a1f9-0d85-4aff-a92f-93e8b36724ff"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.237345 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "81f9a1f9-0d85-4aff-a92f-93e8b36724ff" (UID: "81f9a1f9-0d85-4aff-a92f-93e8b36724ff"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.294935 4691 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.295152 4691 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.295265 4691 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.295352 4691 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.295478 4691 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.295578 4691 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.295657 4691 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.295899 4691 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.295997 4691 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.296081 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p2v65\" (UniqueName: \"kubernetes.io/projected/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-kube-api-access-p2v65\") on node \"crc\" DevicePath \"\"" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.296166 4691 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.296246 4691 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.296331 4691 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.296410 4691 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9a1f9-0d85-4aff-a92f-93e8b36724ff-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.646186 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" event={"ID":"81f9a1f9-0d85-4aff-a92f-93e8b36724ff","Type":"ContainerDied","Data":"e3573d2f64ea41280c0ac23cfb58bf73c7fedfb62becece6e576c8b1410ff749"} Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.646244 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e3573d2f64ea41280c0ac23cfb58bf73c7fedfb62becece6e576c8b1410ff749" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.646319 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.758894 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-zqkwb"] Nov 24 08:27:44 crc kubenswrapper[4691]: E1124 08:27:44.759576 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81f9a1f9-0d85-4aff-a92f-93e8b36724ff" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.759668 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="81f9a1f9-0d85-4aff-a92f-93e8b36724ff" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.760037 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="81f9a1f9-0d85-4aff-a92f-93e8b36724ff" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.761913 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-zqkwb" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.763925 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.764962 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-l69jd" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.764972 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.765139 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.770542 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.774514 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-zqkwb"] Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.806194 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-zqkwb\" (UID: \"7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-zqkwb" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.806344 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-zqkwb\" (UID: \"7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-zqkwb" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.806391 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-zqkwb\" (UID: \"7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-zqkwb" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.806606 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6c8b6\" (UniqueName: \"kubernetes.io/projected/7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0-kube-api-access-6c8b6\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-zqkwb\" (UID: \"7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-zqkwb" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.806656 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-zqkwb\" (UID: \"7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-zqkwb" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.908496 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-zqkwb\" (UID: \"7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-zqkwb" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.908641 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-zqkwb\" (UID: \"7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-zqkwb" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.908673 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-zqkwb\" (UID: \"7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-zqkwb" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.908745 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6c8b6\" (UniqueName: \"kubernetes.io/projected/7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0-kube-api-access-6c8b6\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-zqkwb\" (UID: \"7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-zqkwb" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.908776 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-zqkwb\" (UID: \"7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-zqkwb" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.909863 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-zqkwb\" (UID: \"7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-zqkwb" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.915267 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-zqkwb\" (UID: \"7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-zqkwb" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.915488 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-zqkwb\" (UID: \"7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-zqkwb" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.916239 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-zqkwb\" (UID: \"7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-zqkwb" Nov 24 08:27:44 crc kubenswrapper[4691]: I1124 08:27:44.932587 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6c8b6\" (UniqueName: \"kubernetes.io/projected/7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0-kube-api-access-6c8b6\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-zqkwb\" (UID: \"7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-zqkwb" Nov 24 08:27:45 crc kubenswrapper[4691]: I1124 08:27:45.082790 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-zqkwb" Nov 24 08:27:45 crc kubenswrapper[4691]: I1124 08:27:45.600812 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-zqkwb"] Nov 24 08:27:45 crc kubenswrapper[4691]: I1124 08:27:45.663639 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-zqkwb" event={"ID":"7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0","Type":"ContainerStarted","Data":"346c4a30fefd547651fe75fd4600df1850b5b572c690a55890555096d56585cc"} Nov 24 08:27:46 crc kubenswrapper[4691]: I1124 08:27:46.672944 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-zqkwb" event={"ID":"7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0","Type":"ContainerStarted","Data":"b373c57b96bb80dca6c05b208530141e409048d72274530fba782095b6253dac"} Nov 24 08:27:46 crc kubenswrapper[4691]: I1124 08:27:46.708539 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-zqkwb" podStartSLOduration=2.291671577 podStartE2EDuration="2.708511292s" podCreationTimestamp="2025-11-24 08:27:44 +0000 UTC" firstStartedPulling="2025-11-24 08:27:45.611133046 +0000 UTC m=+1827.610082335" lastFinishedPulling="2025-11-24 08:27:46.027972771 +0000 UTC m=+1828.026922050" observedRunningTime="2025-11-24 08:27:46.701820729 +0000 UTC m=+1828.700769978" watchObservedRunningTime="2025-11-24 08:27:46.708511292 +0000 UTC m=+1828.707460551" Nov 24 08:27:49 crc kubenswrapper[4691]: I1124 08:27:49.761640 4691 scope.go:117] "RemoveContainer" containerID="baafdc5484696f8a6ed3299009fbc07f5ebbc71075e6742f1979ca5e2e2084cf" Nov 24 08:27:49 crc kubenswrapper[4691]: E1124 08:27:49.763087 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:28:01 crc kubenswrapper[4691]: I1124 08:28:01.760775 4691 scope.go:117] "RemoveContainer" containerID="baafdc5484696f8a6ed3299009fbc07f5ebbc71075e6742f1979ca5e2e2084cf" Nov 24 08:28:02 crc kubenswrapper[4691]: I1124 08:28:02.852093 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerStarted","Data":"70419c902bba2a0ee14c9bd0fd9567bf6662a2111ab103dd012ed4d7572a55ae"} Nov 24 08:28:43 crc kubenswrapper[4691]: I1124 08:28:43.233599 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-nnsrx"] Nov 24 08:28:43 crc kubenswrapper[4691]: I1124 08:28:43.237926 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nnsrx" Nov 24 08:28:43 crc kubenswrapper[4691]: I1124 08:28:43.247636 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nnsrx"] Nov 24 08:28:43 crc kubenswrapper[4691]: I1124 08:28:43.328305 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/112ff422-42b5-4f00-8ab0-c59fd2d1c001-utilities\") pod \"redhat-operators-nnsrx\" (UID: \"112ff422-42b5-4f00-8ab0-c59fd2d1c001\") " pod="openshift-marketplace/redhat-operators-nnsrx" Nov 24 08:28:43 crc kubenswrapper[4691]: I1124 08:28:43.328440 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/112ff422-42b5-4f00-8ab0-c59fd2d1c001-catalog-content\") pod \"redhat-operators-nnsrx\" (UID: \"112ff422-42b5-4f00-8ab0-c59fd2d1c001\") " pod="openshift-marketplace/redhat-operators-nnsrx" Nov 24 08:28:43 crc kubenswrapper[4691]: I1124 08:28:43.328598 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jlqxw\" (UniqueName: \"kubernetes.io/projected/112ff422-42b5-4f00-8ab0-c59fd2d1c001-kube-api-access-jlqxw\") pod \"redhat-operators-nnsrx\" (UID: \"112ff422-42b5-4f00-8ab0-c59fd2d1c001\") " pod="openshift-marketplace/redhat-operators-nnsrx" Nov 24 08:28:43 crc kubenswrapper[4691]: I1124 08:28:43.430494 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/112ff422-42b5-4f00-8ab0-c59fd2d1c001-utilities\") pod \"redhat-operators-nnsrx\" (UID: \"112ff422-42b5-4f00-8ab0-c59fd2d1c001\") " pod="openshift-marketplace/redhat-operators-nnsrx" Nov 24 08:28:43 crc kubenswrapper[4691]: I1124 08:28:43.430595 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/112ff422-42b5-4f00-8ab0-c59fd2d1c001-catalog-content\") pod \"redhat-operators-nnsrx\" (UID: \"112ff422-42b5-4f00-8ab0-c59fd2d1c001\") " pod="openshift-marketplace/redhat-operators-nnsrx" Nov 24 08:28:43 crc kubenswrapper[4691]: I1124 08:28:43.430717 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jlqxw\" (UniqueName: \"kubernetes.io/projected/112ff422-42b5-4f00-8ab0-c59fd2d1c001-kube-api-access-jlqxw\") pod \"redhat-operators-nnsrx\" (UID: \"112ff422-42b5-4f00-8ab0-c59fd2d1c001\") " pod="openshift-marketplace/redhat-operators-nnsrx" Nov 24 08:28:43 crc kubenswrapper[4691]: I1124 08:28:43.431038 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/112ff422-42b5-4f00-8ab0-c59fd2d1c001-utilities\") pod \"redhat-operators-nnsrx\" (UID: \"112ff422-42b5-4f00-8ab0-c59fd2d1c001\") " pod="openshift-marketplace/redhat-operators-nnsrx" Nov 24 08:28:43 crc kubenswrapper[4691]: I1124 08:28:43.431149 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/112ff422-42b5-4f00-8ab0-c59fd2d1c001-catalog-content\") pod \"redhat-operators-nnsrx\" (UID: \"112ff422-42b5-4f00-8ab0-c59fd2d1c001\") " pod="openshift-marketplace/redhat-operators-nnsrx" Nov 24 08:28:43 crc kubenswrapper[4691]: I1124 08:28:43.450316 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jlqxw\" (UniqueName: \"kubernetes.io/projected/112ff422-42b5-4f00-8ab0-c59fd2d1c001-kube-api-access-jlqxw\") pod \"redhat-operators-nnsrx\" (UID: \"112ff422-42b5-4f00-8ab0-c59fd2d1c001\") " pod="openshift-marketplace/redhat-operators-nnsrx" Nov 24 08:28:43 crc kubenswrapper[4691]: I1124 08:28:43.564495 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nnsrx" Nov 24 08:28:43 crc kubenswrapper[4691]: I1124 08:28:43.896401 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nnsrx"] Nov 24 08:28:44 crc kubenswrapper[4691]: I1124 08:28:44.295135 4691 generic.go:334] "Generic (PLEG): container finished" podID="112ff422-42b5-4f00-8ab0-c59fd2d1c001" containerID="accda20ef52ed82c1965344f8aa89201a2ce11160fe41d7a71c241c8a8410f69" exitCode=0 Nov 24 08:28:44 crc kubenswrapper[4691]: I1124 08:28:44.295196 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nnsrx" event={"ID":"112ff422-42b5-4f00-8ab0-c59fd2d1c001","Type":"ContainerDied","Data":"accda20ef52ed82c1965344f8aa89201a2ce11160fe41d7a71c241c8a8410f69"} Nov 24 08:28:44 crc kubenswrapper[4691]: I1124 08:28:44.295467 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nnsrx" event={"ID":"112ff422-42b5-4f00-8ab0-c59fd2d1c001","Type":"ContainerStarted","Data":"9013eea361f674fafd5c61aee528562daac3bd6e879349dd16d6d4b7dfd18eaa"} Nov 24 08:28:44 crc kubenswrapper[4691]: I1124 08:28:44.298102 4691 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 08:28:45 crc kubenswrapper[4691]: I1124 08:28:45.309591 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nnsrx" event={"ID":"112ff422-42b5-4f00-8ab0-c59fd2d1c001","Type":"ContainerStarted","Data":"4ee3a0ff3451b6f0d9d3b07312a1f372564a2ce0e73a9a1e69e14b1f96f2be69"} Nov 24 08:28:46 crc kubenswrapper[4691]: I1124 08:28:46.322694 4691 generic.go:334] "Generic (PLEG): container finished" podID="112ff422-42b5-4f00-8ab0-c59fd2d1c001" containerID="4ee3a0ff3451b6f0d9d3b07312a1f372564a2ce0e73a9a1e69e14b1f96f2be69" exitCode=0 Nov 24 08:28:46 crc kubenswrapper[4691]: I1124 08:28:46.322755 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nnsrx" event={"ID":"112ff422-42b5-4f00-8ab0-c59fd2d1c001","Type":"ContainerDied","Data":"4ee3a0ff3451b6f0d9d3b07312a1f372564a2ce0e73a9a1e69e14b1f96f2be69"} Nov 24 08:28:48 crc kubenswrapper[4691]: I1124 08:28:48.347169 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nnsrx" event={"ID":"112ff422-42b5-4f00-8ab0-c59fd2d1c001","Type":"ContainerStarted","Data":"6089cc113a1217f825332e3d421ecde3708e7904298a8f2739f4ac7df1cdeb15"} Nov 24 08:28:48 crc kubenswrapper[4691]: I1124 08:28:48.372348 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-nnsrx" podStartSLOduration=2.91686107 podStartE2EDuration="5.37232401s" podCreationTimestamp="2025-11-24 08:28:43 +0000 UTC" firstStartedPulling="2025-11-24 08:28:44.297858343 +0000 UTC m=+1886.296807592" lastFinishedPulling="2025-11-24 08:28:46.753321283 +0000 UTC m=+1888.752270532" observedRunningTime="2025-11-24 08:28:48.364661352 +0000 UTC m=+1890.363610611" watchObservedRunningTime="2025-11-24 08:28:48.37232401 +0000 UTC m=+1890.371273269" Nov 24 08:28:53 crc kubenswrapper[4691]: I1124 08:28:53.566157 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-nnsrx" Nov 24 08:28:53 crc kubenswrapper[4691]: I1124 08:28:53.566498 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-nnsrx" Nov 24 08:28:54 crc kubenswrapper[4691]: I1124 08:28:54.406354 4691 generic.go:334] "Generic (PLEG): container finished" podID="7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0" containerID="b373c57b96bb80dca6c05b208530141e409048d72274530fba782095b6253dac" exitCode=0 Nov 24 08:28:54 crc kubenswrapper[4691]: I1124 08:28:54.406410 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-zqkwb" event={"ID":"7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0","Type":"ContainerDied","Data":"b373c57b96bb80dca6c05b208530141e409048d72274530fba782095b6253dac"} Nov 24 08:28:54 crc kubenswrapper[4691]: I1124 08:28:54.622273 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-nnsrx" podUID="112ff422-42b5-4f00-8ab0-c59fd2d1c001" containerName="registry-server" probeResult="failure" output=< Nov 24 08:28:54 crc kubenswrapper[4691]: timeout: failed to connect service ":50051" within 1s Nov 24 08:28:54 crc kubenswrapper[4691]: > Nov 24 08:28:55 crc kubenswrapper[4691]: I1124 08:28:55.846471 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-zqkwb" Nov 24 08:28:55 crc kubenswrapper[4691]: I1124 08:28:55.997175 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0-inventory\") pod \"7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0\" (UID: \"7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0\") " Nov 24 08:28:55 crc kubenswrapper[4691]: I1124 08:28:55.998525 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0-ovn-combined-ca-bundle\") pod \"7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0\" (UID: \"7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0\") " Nov 24 08:28:55 crc kubenswrapper[4691]: I1124 08:28:55.999066 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0-ssh-key\") pod \"7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0\" (UID: \"7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0\") " Nov 24 08:28:55 crc kubenswrapper[4691]: I1124 08:28:55.999430 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0-ovncontroller-config-0\") pod \"7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0\" (UID: \"7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0\") " Nov 24 08:28:55 crc kubenswrapper[4691]: I1124 08:28:55.999921 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6c8b6\" (UniqueName: \"kubernetes.io/projected/7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0-kube-api-access-6c8b6\") pod \"7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0\" (UID: \"7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0\") " Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.004881 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0-kube-api-access-6c8b6" (OuterVolumeSpecName: "kube-api-access-6c8b6") pod "7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0" (UID: "7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0"). InnerVolumeSpecName "kube-api-access-6c8b6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.005711 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0" (UID: "7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.028117 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0-inventory" (OuterVolumeSpecName: "inventory") pod "7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0" (UID: "7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.032598 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0" (UID: "7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.034768 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0" (UID: "7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.102211 4691 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.102266 4691 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.102279 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6c8b6\" (UniqueName: \"kubernetes.io/projected/7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0-kube-api-access-6c8b6\") on node \"crc\" DevicePath \"\"" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.102289 4691 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.102301 4691 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.454108 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-zqkwb" event={"ID":"7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0","Type":"ContainerDied","Data":"346c4a30fefd547651fe75fd4600df1850b5b572c690a55890555096d56585cc"} Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.454151 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="346c4a30fefd547651fe75fd4600df1850b5b572c690a55890555096d56585cc" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.454207 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-zqkwb" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.540329 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2"] Nov 24 08:28:56 crc kubenswrapper[4691]: E1124 08:28:56.540999 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.541085 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.541347 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.543576 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.548010 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.548037 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-l69jd" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.548503 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.556680 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.557207 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.557351 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.561086 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2"] Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.611404 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jlzfl\" (UniqueName: \"kubernetes.io/projected/42d1ff5d-430e-489b-9015-b8a7ad572893-kube-api-access-jlzfl\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2\" (UID: \"42d1ff5d-430e-489b-9015-b8a7ad572893\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.611731 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/42d1ff5d-430e-489b-9015-b8a7ad572893-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2\" (UID: \"42d1ff5d-430e-489b-9015-b8a7ad572893\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.611854 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42d1ff5d-430e-489b-9015-b8a7ad572893-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2\" (UID: \"42d1ff5d-430e-489b-9015-b8a7ad572893\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.611927 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/42d1ff5d-430e-489b-9015-b8a7ad572893-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2\" (UID: \"42d1ff5d-430e-489b-9015-b8a7ad572893\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.611966 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/42d1ff5d-430e-489b-9015-b8a7ad572893-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2\" (UID: \"42d1ff5d-430e-489b-9015-b8a7ad572893\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.612006 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/42d1ff5d-430e-489b-9015-b8a7ad572893-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2\" (UID: \"42d1ff5d-430e-489b-9015-b8a7ad572893\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.713837 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jlzfl\" (UniqueName: \"kubernetes.io/projected/42d1ff5d-430e-489b-9015-b8a7ad572893-kube-api-access-jlzfl\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2\" (UID: \"42d1ff5d-430e-489b-9015-b8a7ad572893\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.713946 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/42d1ff5d-430e-489b-9015-b8a7ad572893-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2\" (UID: \"42d1ff5d-430e-489b-9015-b8a7ad572893\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.713989 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42d1ff5d-430e-489b-9015-b8a7ad572893-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2\" (UID: \"42d1ff5d-430e-489b-9015-b8a7ad572893\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.714017 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/42d1ff5d-430e-489b-9015-b8a7ad572893-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2\" (UID: \"42d1ff5d-430e-489b-9015-b8a7ad572893\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.714040 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/42d1ff5d-430e-489b-9015-b8a7ad572893-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2\" (UID: \"42d1ff5d-430e-489b-9015-b8a7ad572893\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.714705 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/42d1ff5d-430e-489b-9015-b8a7ad572893-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2\" (UID: \"42d1ff5d-430e-489b-9015-b8a7ad572893\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.718253 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42d1ff5d-430e-489b-9015-b8a7ad572893-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2\" (UID: \"42d1ff5d-430e-489b-9015-b8a7ad572893\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.718800 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/42d1ff5d-430e-489b-9015-b8a7ad572893-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2\" (UID: \"42d1ff5d-430e-489b-9015-b8a7ad572893\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.718968 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/42d1ff5d-430e-489b-9015-b8a7ad572893-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2\" (UID: \"42d1ff5d-430e-489b-9015-b8a7ad572893\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.719129 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/42d1ff5d-430e-489b-9015-b8a7ad572893-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2\" (UID: \"42d1ff5d-430e-489b-9015-b8a7ad572893\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.722479 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/42d1ff5d-430e-489b-9015-b8a7ad572893-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2\" (UID: \"42d1ff5d-430e-489b-9015-b8a7ad572893\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.729646 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jlzfl\" (UniqueName: \"kubernetes.io/projected/42d1ff5d-430e-489b-9015-b8a7ad572893-kube-api-access-jlzfl\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2\" (UID: \"42d1ff5d-430e-489b-9015-b8a7ad572893\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2" Nov 24 08:28:56 crc kubenswrapper[4691]: I1124 08:28:56.885152 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2" Nov 24 08:28:57 crc kubenswrapper[4691]: I1124 08:28:57.461846 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2"] Nov 24 08:28:57 crc kubenswrapper[4691]: I1124 08:28:57.465200 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2" event={"ID":"42d1ff5d-430e-489b-9015-b8a7ad572893","Type":"ContainerStarted","Data":"58ba6fbf32ec2f694c542ac817c32d9ab01a898fa68679d5ff7904deea2b26bb"} Nov 24 08:28:58 crc kubenswrapper[4691]: I1124 08:28:58.475572 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2" event={"ID":"42d1ff5d-430e-489b-9015-b8a7ad572893","Type":"ContainerStarted","Data":"19678bf0f69c7905099938133195c9f237aa6c85277fdf7a828b9ae287df2e4b"} Nov 24 08:28:58 crc kubenswrapper[4691]: I1124 08:28:58.495763 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2" podStartSLOduration=2.034052108 podStartE2EDuration="2.49574486s" podCreationTimestamp="2025-11-24 08:28:56 +0000 UTC" firstStartedPulling="2025-11-24 08:28:57.461392067 +0000 UTC m=+1899.460341316" lastFinishedPulling="2025-11-24 08:28:57.923084809 +0000 UTC m=+1899.922034068" observedRunningTime="2025-11-24 08:28:58.491414887 +0000 UTC m=+1900.490364136" watchObservedRunningTime="2025-11-24 08:28:58.49574486 +0000 UTC m=+1900.494694109" Nov 24 08:29:04 crc kubenswrapper[4691]: I1124 08:29:04.632887 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-nnsrx" podUID="112ff422-42b5-4f00-8ab0-c59fd2d1c001" containerName="registry-server" probeResult="failure" output=< Nov 24 08:29:04 crc kubenswrapper[4691]: timeout: failed to connect service ":50051" within 1s Nov 24 08:29:04 crc kubenswrapper[4691]: > Nov 24 08:29:13 crc kubenswrapper[4691]: I1124 08:29:13.657142 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-nnsrx" Nov 24 08:29:13 crc kubenswrapper[4691]: I1124 08:29:13.748186 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-nnsrx" Nov 24 08:29:14 crc kubenswrapper[4691]: I1124 08:29:14.432723 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nnsrx"] Nov 24 08:29:15 crc kubenswrapper[4691]: I1124 08:29:15.643322 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-nnsrx" podUID="112ff422-42b5-4f00-8ab0-c59fd2d1c001" containerName="registry-server" containerID="cri-o://6089cc113a1217f825332e3d421ecde3708e7904298a8f2739f4ac7df1cdeb15" gracePeriod=2 Nov 24 08:29:16 crc kubenswrapper[4691]: I1124 08:29:16.177762 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nnsrx" Nov 24 08:29:16 crc kubenswrapper[4691]: I1124 08:29:16.321286 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/112ff422-42b5-4f00-8ab0-c59fd2d1c001-catalog-content\") pod \"112ff422-42b5-4f00-8ab0-c59fd2d1c001\" (UID: \"112ff422-42b5-4f00-8ab0-c59fd2d1c001\") " Nov 24 08:29:16 crc kubenswrapper[4691]: I1124 08:29:16.321359 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jlqxw\" (UniqueName: \"kubernetes.io/projected/112ff422-42b5-4f00-8ab0-c59fd2d1c001-kube-api-access-jlqxw\") pod \"112ff422-42b5-4f00-8ab0-c59fd2d1c001\" (UID: \"112ff422-42b5-4f00-8ab0-c59fd2d1c001\") " Nov 24 08:29:16 crc kubenswrapper[4691]: I1124 08:29:16.321585 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/112ff422-42b5-4f00-8ab0-c59fd2d1c001-utilities\") pod \"112ff422-42b5-4f00-8ab0-c59fd2d1c001\" (UID: \"112ff422-42b5-4f00-8ab0-c59fd2d1c001\") " Nov 24 08:29:16 crc kubenswrapper[4691]: I1124 08:29:16.322719 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/112ff422-42b5-4f00-8ab0-c59fd2d1c001-utilities" (OuterVolumeSpecName: "utilities") pod "112ff422-42b5-4f00-8ab0-c59fd2d1c001" (UID: "112ff422-42b5-4f00-8ab0-c59fd2d1c001"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:29:16 crc kubenswrapper[4691]: I1124 08:29:16.330083 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/112ff422-42b5-4f00-8ab0-c59fd2d1c001-kube-api-access-jlqxw" (OuterVolumeSpecName: "kube-api-access-jlqxw") pod "112ff422-42b5-4f00-8ab0-c59fd2d1c001" (UID: "112ff422-42b5-4f00-8ab0-c59fd2d1c001"). InnerVolumeSpecName "kube-api-access-jlqxw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:29:16 crc kubenswrapper[4691]: I1124 08:29:16.423989 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/112ff422-42b5-4f00-8ab0-c59fd2d1c001-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 08:29:16 crc kubenswrapper[4691]: I1124 08:29:16.424044 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jlqxw\" (UniqueName: \"kubernetes.io/projected/112ff422-42b5-4f00-8ab0-c59fd2d1c001-kube-api-access-jlqxw\") on node \"crc\" DevicePath \"\"" Nov 24 08:29:16 crc kubenswrapper[4691]: I1124 08:29:16.438402 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/112ff422-42b5-4f00-8ab0-c59fd2d1c001-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "112ff422-42b5-4f00-8ab0-c59fd2d1c001" (UID: "112ff422-42b5-4f00-8ab0-c59fd2d1c001"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:29:16 crc kubenswrapper[4691]: I1124 08:29:16.526643 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/112ff422-42b5-4f00-8ab0-c59fd2d1c001-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 08:29:16 crc kubenswrapper[4691]: I1124 08:29:16.655166 4691 generic.go:334] "Generic (PLEG): container finished" podID="112ff422-42b5-4f00-8ab0-c59fd2d1c001" containerID="6089cc113a1217f825332e3d421ecde3708e7904298a8f2739f4ac7df1cdeb15" exitCode=0 Nov 24 08:29:16 crc kubenswrapper[4691]: I1124 08:29:16.655214 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nnsrx" Nov 24 08:29:16 crc kubenswrapper[4691]: I1124 08:29:16.655224 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nnsrx" event={"ID":"112ff422-42b5-4f00-8ab0-c59fd2d1c001","Type":"ContainerDied","Data":"6089cc113a1217f825332e3d421ecde3708e7904298a8f2739f4ac7df1cdeb15"} Nov 24 08:29:16 crc kubenswrapper[4691]: I1124 08:29:16.655252 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nnsrx" event={"ID":"112ff422-42b5-4f00-8ab0-c59fd2d1c001","Type":"ContainerDied","Data":"9013eea361f674fafd5c61aee528562daac3bd6e879349dd16d6d4b7dfd18eaa"} Nov 24 08:29:16 crc kubenswrapper[4691]: I1124 08:29:16.655275 4691 scope.go:117] "RemoveContainer" containerID="6089cc113a1217f825332e3d421ecde3708e7904298a8f2739f4ac7df1cdeb15" Nov 24 08:29:16 crc kubenswrapper[4691]: I1124 08:29:16.680479 4691 scope.go:117] "RemoveContainer" containerID="4ee3a0ff3451b6f0d9d3b07312a1f372564a2ce0e73a9a1e69e14b1f96f2be69" Nov 24 08:29:16 crc kubenswrapper[4691]: I1124 08:29:16.689248 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nnsrx"] Nov 24 08:29:16 crc kubenswrapper[4691]: I1124 08:29:16.696440 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-nnsrx"] Nov 24 08:29:16 crc kubenswrapper[4691]: I1124 08:29:16.726659 4691 scope.go:117] "RemoveContainer" containerID="accda20ef52ed82c1965344f8aa89201a2ce11160fe41d7a71c241c8a8410f69" Nov 24 08:29:16 crc kubenswrapper[4691]: I1124 08:29:16.752601 4691 scope.go:117] "RemoveContainer" containerID="6089cc113a1217f825332e3d421ecde3708e7904298a8f2739f4ac7df1cdeb15" Nov 24 08:29:16 crc kubenswrapper[4691]: E1124 08:29:16.753011 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6089cc113a1217f825332e3d421ecde3708e7904298a8f2739f4ac7df1cdeb15\": container with ID starting with 6089cc113a1217f825332e3d421ecde3708e7904298a8f2739f4ac7df1cdeb15 not found: ID does not exist" containerID="6089cc113a1217f825332e3d421ecde3708e7904298a8f2739f4ac7df1cdeb15" Nov 24 08:29:16 crc kubenswrapper[4691]: I1124 08:29:16.753053 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6089cc113a1217f825332e3d421ecde3708e7904298a8f2739f4ac7df1cdeb15"} err="failed to get container status \"6089cc113a1217f825332e3d421ecde3708e7904298a8f2739f4ac7df1cdeb15\": rpc error: code = NotFound desc = could not find container \"6089cc113a1217f825332e3d421ecde3708e7904298a8f2739f4ac7df1cdeb15\": container with ID starting with 6089cc113a1217f825332e3d421ecde3708e7904298a8f2739f4ac7df1cdeb15 not found: ID does not exist" Nov 24 08:29:16 crc kubenswrapper[4691]: I1124 08:29:16.753078 4691 scope.go:117] "RemoveContainer" containerID="4ee3a0ff3451b6f0d9d3b07312a1f372564a2ce0e73a9a1e69e14b1f96f2be69" Nov 24 08:29:16 crc kubenswrapper[4691]: E1124 08:29:16.753381 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ee3a0ff3451b6f0d9d3b07312a1f372564a2ce0e73a9a1e69e14b1f96f2be69\": container with ID starting with 4ee3a0ff3451b6f0d9d3b07312a1f372564a2ce0e73a9a1e69e14b1f96f2be69 not found: ID does not exist" containerID="4ee3a0ff3451b6f0d9d3b07312a1f372564a2ce0e73a9a1e69e14b1f96f2be69" Nov 24 08:29:16 crc kubenswrapper[4691]: I1124 08:29:16.753412 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ee3a0ff3451b6f0d9d3b07312a1f372564a2ce0e73a9a1e69e14b1f96f2be69"} err="failed to get container status \"4ee3a0ff3451b6f0d9d3b07312a1f372564a2ce0e73a9a1e69e14b1f96f2be69\": rpc error: code = NotFound desc = could not find container \"4ee3a0ff3451b6f0d9d3b07312a1f372564a2ce0e73a9a1e69e14b1f96f2be69\": container with ID starting with 4ee3a0ff3451b6f0d9d3b07312a1f372564a2ce0e73a9a1e69e14b1f96f2be69 not found: ID does not exist" Nov 24 08:29:16 crc kubenswrapper[4691]: I1124 08:29:16.753430 4691 scope.go:117] "RemoveContainer" containerID="accda20ef52ed82c1965344f8aa89201a2ce11160fe41d7a71c241c8a8410f69" Nov 24 08:29:16 crc kubenswrapper[4691]: E1124 08:29:16.753663 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"accda20ef52ed82c1965344f8aa89201a2ce11160fe41d7a71c241c8a8410f69\": container with ID starting with accda20ef52ed82c1965344f8aa89201a2ce11160fe41d7a71c241c8a8410f69 not found: ID does not exist" containerID="accda20ef52ed82c1965344f8aa89201a2ce11160fe41d7a71c241c8a8410f69" Nov 24 08:29:16 crc kubenswrapper[4691]: I1124 08:29:16.753688 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"accda20ef52ed82c1965344f8aa89201a2ce11160fe41d7a71c241c8a8410f69"} err="failed to get container status \"accda20ef52ed82c1965344f8aa89201a2ce11160fe41d7a71c241c8a8410f69\": rpc error: code = NotFound desc = could not find container \"accda20ef52ed82c1965344f8aa89201a2ce11160fe41d7a71c241c8a8410f69\": container with ID starting with accda20ef52ed82c1965344f8aa89201a2ce11160fe41d7a71c241c8a8410f69 not found: ID does not exist" Nov 24 08:29:16 crc kubenswrapper[4691]: I1124 08:29:16.771131 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="112ff422-42b5-4f00-8ab0-c59fd2d1c001" path="/var/lib/kubelet/pods/112ff422-42b5-4f00-8ab0-c59fd2d1c001/volumes" Nov 24 08:29:23 crc kubenswrapper[4691]: I1124 08:29:23.273106 4691 scope.go:117] "RemoveContainer" containerID="a429b06d2e92dccf6756fb059dcd67d7cc38ecf811814143edd4b457875eddbd" Nov 24 08:29:23 crc kubenswrapper[4691]: I1124 08:29:23.301877 4691 scope.go:117] "RemoveContainer" containerID="28c70d8cfdd7ff92d8f1f59a40848f1c4b5ca15a2f392647d65e1199008eaaff" Nov 24 08:29:23 crc kubenswrapper[4691]: I1124 08:29:23.338630 4691 scope.go:117] "RemoveContainer" containerID="0e852d0c87c8218aa1301677763cabc7e776fc1aee28f5879e607ba469a4ce79" Nov 24 08:29:49 crc kubenswrapper[4691]: I1124 08:29:49.040600 4691 generic.go:334] "Generic (PLEG): container finished" podID="42d1ff5d-430e-489b-9015-b8a7ad572893" containerID="19678bf0f69c7905099938133195c9f237aa6c85277fdf7a828b9ae287df2e4b" exitCode=0 Nov 24 08:29:49 crc kubenswrapper[4691]: I1124 08:29:49.040728 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2" event={"ID":"42d1ff5d-430e-489b-9015-b8a7ad572893","Type":"ContainerDied","Data":"19678bf0f69c7905099938133195c9f237aa6c85277fdf7a828b9ae287df2e4b"} Nov 24 08:29:50 crc kubenswrapper[4691]: I1124 08:29:50.491177 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2" Nov 24 08:29:50 crc kubenswrapper[4691]: I1124 08:29:50.600692 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/42d1ff5d-430e-489b-9015-b8a7ad572893-ssh-key\") pod \"42d1ff5d-430e-489b-9015-b8a7ad572893\" (UID: \"42d1ff5d-430e-489b-9015-b8a7ad572893\") " Nov 24 08:29:50 crc kubenswrapper[4691]: I1124 08:29:50.600758 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42d1ff5d-430e-489b-9015-b8a7ad572893-neutron-metadata-combined-ca-bundle\") pod \"42d1ff5d-430e-489b-9015-b8a7ad572893\" (UID: \"42d1ff5d-430e-489b-9015-b8a7ad572893\") " Nov 24 08:29:50 crc kubenswrapper[4691]: I1124 08:29:50.600900 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jlzfl\" (UniqueName: \"kubernetes.io/projected/42d1ff5d-430e-489b-9015-b8a7ad572893-kube-api-access-jlzfl\") pod \"42d1ff5d-430e-489b-9015-b8a7ad572893\" (UID: \"42d1ff5d-430e-489b-9015-b8a7ad572893\") " Nov 24 08:29:50 crc kubenswrapper[4691]: I1124 08:29:50.600961 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/42d1ff5d-430e-489b-9015-b8a7ad572893-nova-metadata-neutron-config-0\") pod \"42d1ff5d-430e-489b-9015-b8a7ad572893\" (UID: \"42d1ff5d-430e-489b-9015-b8a7ad572893\") " Nov 24 08:29:50 crc kubenswrapper[4691]: I1124 08:29:50.601009 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/42d1ff5d-430e-489b-9015-b8a7ad572893-inventory\") pod \"42d1ff5d-430e-489b-9015-b8a7ad572893\" (UID: \"42d1ff5d-430e-489b-9015-b8a7ad572893\") " Nov 24 08:29:50 crc kubenswrapper[4691]: I1124 08:29:50.601039 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/42d1ff5d-430e-489b-9015-b8a7ad572893-neutron-ovn-metadata-agent-neutron-config-0\") pod \"42d1ff5d-430e-489b-9015-b8a7ad572893\" (UID: \"42d1ff5d-430e-489b-9015-b8a7ad572893\") " Nov 24 08:29:50 crc kubenswrapper[4691]: I1124 08:29:50.606240 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42d1ff5d-430e-489b-9015-b8a7ad572893-kube-api-access-jlzfl" (OuterVolumeSpecName: "kube-api-access-jlzfl") pod "42d1ff5d-430e-489b-9015-b8a7ad572893" (UID: "42d1ff5d-430e-489b-9015-b8a7ad572893"). InnerVolumeSpecName "kube-api-access-jlzfl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:29:50 crc kubenswrapper[4691]: I1124 08:29:50.610667 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42d1ff5d-430e-489b-9015-b8a7ad572893-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "42d1ff5d-430e-489b-9015-b8a7ad572893" (UID: "42d1ff5d-430e-489b-9015-b8a7ad572893"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:29:50 crc kubenswrapper[4691]: I1124 08:29:50.630532 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42d1ff5d-430e-489b-9015-b8a7ad572893-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "42d1ff5d-430e-489b-9015-b8a7ad572893" (UID: "42d1ff5d-430e-489b-9015-b8a7ad572893"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:29:50 crc kubenswrapper[4691]: I1124 08:29:50.632706 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42d1ff5d-430e-489b-9015-b8a7ad572893-inventory" (OuterVolumeSpecName: "inventory") pod "42d1ff5d-430e-489b-9015-b8a7ad572893" (UID: "42d1ff5d-430e-489b-9015-b8a7ad572893"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:29:50 crc kubenswrapper[4691]: I1124 08:29:50.634775 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42d1ff5d-430e-489b-9015-b8a7ad572893-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "42d1ff5d-430e-489b-9015-b8a7ad572893" (UID: "42d1ff5d-430e-489b-9015-b8a7ad572893"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:29:50 crc kubenswrapper[4691]: I1124 08:29:50.639672 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42d1ff5d-430e-489b-9015-b8a7ad572893-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "42d1ff5d-430e-489b-9015-b8a7ad572893" (UID: "42d1ff5d-430e-489b-9015-b8a7ad572893"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:29:50 crc kubenswrapper[4691]: I1124 08:29:50.703483 4691 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/42d1ff5d-430e-489b-9015-b8a7ad572893-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 08:29:50 crc kubenswrapper[4691]: I1124 08:29:50.703523 4691 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/42d1ff5d-430e-489b-9015-b8a7ad572893-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 24 08:29:50 crc kubenswrapper[4691]: I1124 08:29:50.703541 4691 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/42d1ff5d-430e-489b-9015-b8a7ad572893-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 08:29:50 crc kubenswrapper[4691]: I1124 08:29:50.703559 4691 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42d1ff5d-430e-489b-9015-b8a7ad572893-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:29:50 crc kubenswrapper[4691]: I1124 08:29:50.703575 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jlzfl\" (UniqueName: \"kubernetes.io/projected/42d1ff5d-430e-489b-9015-b8a7ad572893-kube-api-access-jlzfl\") on node \"crc\" DevicePath \"\"" Nov 24 08:29:50 crc kubenswrapper[4691]: I1124 08:29:50.703591 4691 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/42d1ff5d-430e-489b-9015-b8a7ad572893-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 24 08:29:51 crc kubenswrapper[4691]: I1124 08:29:51.064246 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2" event={"ID":"42d1ff5d-430e-489b-9015-b8a7ad572893","Type":"ContainerDied","Data":"58ba6fbf32ec2f694c542ac817c32d9ab01a898fa68679d5ff7904deea2b26bb"} Nov 24 08:29:51 crc kubenswrapper[4691]: I1124 08:29:51.064286 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2" Nov 24 08:29:51 crc kubenswrapper[4691]: I1124 08:29:51.064310 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="58ba6fbf32ec2f694c542ac817c32d9ab01a898fa68679d5ff7904deea2b26bb" Nov 24 08:29:51 crc kubenswrapper[4691]: I1124 08:29:51.197592 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm"] Nov 24 08:29:51 crc kubenswrapper[4691]: E1124 08:29:51.198620 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="112ff422-42b5-4f00-8ab0-c59fd2d1c001" containerName="registry-server" Nov 24 08:29:51 crc kubenswrapper[4691]: I1124 08:29:51.198738 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="112ff422-42b5-4f00-8ab0-c59fd2d1c001" containerName="registry-server" Nov 24 08:29:51 crc kubenswrapper[4691]: E1124 08:29:51.198856 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="112ff422-42b5-4f00-8ab0-c59fd2d1c001" containerName="extract-utilities" Nov 24 08:29:51 crc kubenswrapper[4691]: I1124 08:29:51.198928 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="112ff422-42b5-4f00-8ab0-c59fd2d1c001" containerName="extract-utilities" Nov 24 08:29:51 crc kubenswrapper[4691]: E1124 08:29:51.199031 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="112ff422-42b5-4f00-8ab0-c59fd2d1c001" containerName="extract-content" Nov 24 08:29:51 crc kubenswrapper[4691]: I1124 08:29:51.199101 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="112ff422-42b5-4f00-8ab0-c59fd2d1c001" containerName="extract-content" Nov 24 08:29:51 crc kubenswrapper[4691]: E1124 08:29:51.199173 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42d1ff5d-430e-489b-9015-b8a7ad572893" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 24 08:29:51 crc kubenswrapper[4691]: I1124 08:29:51.199251 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="42d1ff5d-430e-489b-9015-b8a7ad572893" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 24 08:29:51 crc kubenswrapper[4691]: I1124 08:29:51.199583 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="42d1ff5d-430e-489b-9015-b8a7ad572893" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 24 08:29:51 crc kubenswrapper[4691]: I1124 08:29:51.199737 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="112ff422-42b5-4f00-8ab0-c59fd2d1c001" containerName="registry-server" Nov 24 08:29:51 crc kubenswrapper[4691]: I1124 08:29:51.200970 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm" Nov 24 08:29:51 crc kubenswrapper[4691]: I1124 08:29:51.206569 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 08:29:51 crc kubenswrapper[4691]: I1124 08:29:51.206893 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 08:29:51 crc kubenswrapper[4691]: I1124 08:29:51.206894 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Nov 24 08:29:51 crc kubenswrapper[4691]: I1124 08:29:51.207649 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 08:29:51 crc kubenswrapper[4691]: I1124 08:29:51.207862 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-l69jd" Nov 24 08:29:51 crc kubenswrapper[4691]: I1124 08:29:51.227124 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm"] Nov 24 08:29:51 crc kubenswrapper[4691]: I1124 08:29:51.318230 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e9953558-8b56-432e-bde8-c07beaa047c0-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm\" (UID: \"e9953558-8b56-432e-bde8-c07beaa047c0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm" Nov 24 08:29:51 crc kubenswrapper[4691]: I1124 08:29:51.318546 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e9953558-8b56-432e-bde8-c07beaa047c0-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm\" (UID: \"e9953558-8b56-432e-bde8-c07beaa047c0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm" Nov 24 08:29:51 crc kubenswrapper[4691]: I1124 08:29:51.318779 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9953558-8b56-432e-bde8-c07beaa047c0-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm\" (UID: \"e9953558-8b56-432e-bde8-c07beaa047c0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm" Nov 24 08:29:51 crc kubenswrapper[4691]: I1124 08:29:51.318989 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d7d2x\" (UniqueName: \"kubernetes.io/projected/e9953558-8b56-432e-bde8-c07beaa047c0-kube-api-access-d7d2x\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm\" (UID: \"e9953558-8b56-432e-bde8-c07beaa047c0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm" Nov 24 08:29:51 crc kubenswrapper[4691]: I1124 08:29:51.319114 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/e9953558-8b56-432e-bde8-c07beaa047c0-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm\" (UID: \"e9953558-8b56-432e-bde8-c07beaa047c0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm" Nov 24 08:29:51 crc kubenswrapper[4691]: I1124 08:29:51.421770 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d7d2x\" (UniqueName: \"kubernetes.io/projected/e9953558-8b56-432e-bde8-c07beaa047c0-kube-api-access-d7d2x\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm\" (UID: \"e9953558-8b56-432e-bde8-c07beaa047c0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm" Nov 24 08:29:51 crc kubenswrapper[4691]: I1124 08:29:51.421955 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/e9953558-8b56-432e-bde8-c07beaa047c0-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm\" (UID: \"e9953558-8b56-432e-bde8-c07beaa047c0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm" Nov 24 08:29:51 crc kubenswrapper[4691]: I1124 08:29:51.422043 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e9953558-8b56-432e-bde8-c07beaa047c0-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm\" (UID: \"e9953558-8b56-432e-bde8-c07beaa047c0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm" Nov 24 08:29:51 crc kubenswrapper[4691]: I1124 08:29:51.422174 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e9953558-8b56-432e-bde8-c07beaa047c0-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm\" (UID: \"e9953558-8b56-432e-bde8-c07beaa047c0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm" Nov 24 08:29:51 crc kubenswrapper[4691]: I1124 08:29:51.422242 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9953558-8b56-432e-bde8-c07beaa047c0-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm\" (UID: \"e9953558-8b56-432e-bde8-c07beaa047c0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm" Nov 24 08:29:51 crc kubenswrapper[4691]: I1124 08:29:51.428312 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/e9953558-8b56-432e-bde8-c07beaa047c0-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm\" (UID: \"e9953558-8b56-432e-bde8-c07beaa047c0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm" Nov 24 08:29:51 crc kubenswrapper[4691]: I1124 08:29:51.429785 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e9953558-8b56-432e-bde8-c07beaa047c0-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm\" (UID: \"e9953558-8b56-432e-bde8-c07beaa047c0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm" Nov 24 08:29:51 crc kubenswrapper[4691]: I1124 08:29:51.429914 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e9953558-8b56-432e-bde8-c07beaa047c0-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm\" (UID: \"e9953558-8b56-432e-bde8-c07beaa047c0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm" Nov 24 08:29:51 crc kubenswrapper[4691]: I1124 08:29:51.430757 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9953558-8b56-432e-bde8-c07beaa047c0-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm\" (UID: \"e9953558-8b56-432e-bde8-c07beaa047c0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm" Nov 24 08:29:51 crc kubenswrapper[4691]: I1124 08:29:51.445083 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d7d2x\" (UniqueName: \"kubernetes.io/projected/e9953558-8b56-432e-bde8-c07beaa047c0-kube-api-access-d7d2x\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm\" (UID: \"e9953558-8b56-432e-bde8-c07beaa047c0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm" Nov 24 08:29:51 crc kubenswrapper[4691]: I1124 08:29:51.542623 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm" Nov 24 08:29:52 crc kubenswrapper[4691]: I1124 08:29:52.118880 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm"] Nov 24 08:29:53 crc kubenswrapper[4691]: I1124 08:29:53.094288 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm" event={"ID":"e9953558-8b56-432e-bde8-c07beaa047c0","Type":"ContainerStarted","Data":"44312e0a6a2a6ce1137afb59cebbc5ad60605e1b469f5dbb639ea6fc373a769d"} Nov 24 08:29:53 crc kubenswrapper[4691]: I1124 08:29:53.094367 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm" event={"ID":"e9953558-8b56-432e-bde8-c07beaa047c0","Type":"ContainerStarted","Data":"3ca259ce79a55dc99d7b178b3de11373381e37161bb8a22ffc45140fb7d95280"} Nov 24 08:29:53 crc kubenswrapper[4691]: I1124 08:29:53.115756 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm" podStartSLOduration=1.673947343 podStartE2EDuration="2.11573753s" podCreationTimestamp="2025-11-24 08:29:51 +0000 UTC" firstStartedPulling="2025-11-24 08:29:52.125103259 +0000 UTC m=+1954.124052538" lastFinishedPulling="2025-11-24 08:29:52.566893456 +0000 UTC m=+1954.565842725" observedRunningTime="2025-11-24 08:29:53.114999959 +0000 UTC m=+1955.113949218" watchObservedRunningTime="2025-11-24 08:29:53.11573753 +0000 UTC m=+1955.114686779" Nov 24 08:30:00 crc kubenswrapper[4691]: I1124 08:30:00.141778 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399550-sh8qx"] Nov 24 08:30:00 crc kubenswrapper[4691]: I1124 08:30:00.145784 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399550-sh8qx" Nov 24 08:30:00 crc kubenswrapper[4691]: I1124 08:30:00.148729 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 24 08:30:00 crc kubenswrapper[4691]: I1124 08:30:00.149335 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 24 08:30:00 crc kubenswrapper[4691]: I1124 08:30:00.155192 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399550-sh8qx"] Nov 24 08:30:00 crc kubenswrapper[4691]: I1124 08:30:00.222881 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d8dac9fc-69b6-4899-a8fd-7aa75e002329-config-volume\") pod \"collect-profiles-29399550-sh8qx\" (UID: \"d8dac9fc-69b6-4899-a8fd-7aa75e002329\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399550-sh8qx" Nov 24 08:30:00 crc kubenswrapper[4691]: I1124 08:30:00.223083 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2crxf\" (UniqueName: \"kubernetes.io/projected/d8dac9fc-69b6-4899-a8fd-7aa75e002329-kube-api-access-2crxf\") pod \"collect-profiles-29399550-sh8qx\" (UID: \"d8dac9fc-69b6-4899-a8fd-7aa75e002329\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399550-sh8qx" Nov 24 08:30:00 crc kubenswrapper[4691]: I1124 08:30:00.223326 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d8dac9fc-69b6-4899-a8fd-7aa75e002329-secret-volume\") pod \"collect-profiles-29399550-sh8qx\" (UID: \"d8dac9fc-69b6-4899-a8fd-7aa75e002329\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399550-sh8qx" Nov 24 08:30:00 crc kubenswrapper[4691]: I1124 08:30:00.325001 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d8dac9fc-69b6-4899-a8fd-7aa75e002329-secret-volume\") pod \"collect-profiles-29399550-sh8qx\" (UID: \"d8dac9fc-69b6-4899-a8fd-7aa75e002329\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399550-sh8qx" Nov 24 08:30:00 crc kubenswrapper[4691]: I1124 08:30:00.325136 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d8dac9fc-69b6-4899-a8fd-7aa75e002329-config-volume\") pod \"collect-profiles-29399550-sh8qx\" (UID: \"d8dac9fc-69b6-4899-a8fd-7aa75e002329\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399550-sh8qx" Nov 24 08:30:00 crc kubenswrapper[4691]: I1124 08:30:00.325199 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2crxf\" (UniqueName: \"kubernetes.io/projected/d8dac9fc-69b6-4899-a8fd-7aa75e002329-kube-api-access-2crxf\") pod \"collect-profiles-29399550-sh8qx\" (UID: \"d8dac9fc-69b6-4899-a8fd-7aa75e002329\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399550-sh8qx" Nov 24 08:30:00 crc kubenswrapper[4691]: I1124 08:30:00.326288 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d8dac9fc-69b6-4899-a8fd-7aa75e002329-config-volume\") pod \"collect-profiles-29399550-sh8qx\" (UID: \"d8dac9fc-69b6-4899-a8fd-7aa75e002329\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399550-sh8qx" Nov 24 08:30:00 crc kubenswrapper[4691]: I1124 08:30:00.334764 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d8dac9fc-69b6-4899-a8fd-7aa75e002329-secret-volume\") pod \"collect-profiles-29399550-sh8qx\" (UID: \"d8dac9fc-69b6-4899-a8fd-7aa75e002329\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399550-sh8qx" Nov 24 08:30:00 crc kubenswrapper[4691]: I1124 08:30:00.341128 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2crxf\" (UniqueName: \"kubernetes.io/projected/d8dac9fc-69b6-4899-a8fd-7aa75e002329-kube-api-access-2crxf\") pod \"collect-profiles-29399550-sh8qx\" (UID: \"d8dac9fc-69b6-4899-a8fd-7aa75e002329\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399550-sh8qx" Nov 24 08:30:00 crc kubenswrapper[4691]: I1124 08:30:00.472747 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399550-sh8qx" Nov 24 08:30:00 crc kubenswrapper[4691]: I1124 08:30:00.936994 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399550-sh8qx"] Nov 24 08:30:01 crc kubenswrapper[4691]: I1124 08:30:01.172125 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399550-sh8qx" event={"ID":"d8dac9fc-69b6-4899-a8fd-7aa75e002329","Type":"ContainerStarted","Data":"d561ae0646a0431ec713024573bcdd28dc487e226753c4ef6fe075f36b63b97e"} Nov 24 08:30:01 crc kubenswrapper[4691]: I1124 08:30:01.172535 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399550-sh8qx" event={"ID":"d8dac9fc-69b6-4899-a8fd-7aa75e002329","Type":"ContainerStarted","Data":"0f7307c3c291f5941cbdac239d29a3c02522c84532e3885670c74808616cffb8"} Nov 24 08:30:01 crc kubenswrapper[4691]: I1124 08:30:01.196861 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29399550-sh8qx" podStartSLOduration=1.196834152 podStartE2EDuration="1.196834152s" podCreationTimestamp="2025-11-24 08:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:30:01.189126013 +0000 UTC m=+1963.188075272" watchObservedRunningTime="2025-11-24 08:30:01.196834152 +0000 UTC m=+1963.195783411" Nov 24 08:30:02 crc kubenswrapper[4691]: I1124 08:30:02.193134 4691 generic.go:334] "Generic (PLEG): container finished" podID="d8dac9fc-69b6-4899-a8fd-7aa75e002329" containerID="d561ae0646a0431ec713024573bcdd28dc487e226753c4ef6fe075f36b63b97e" exitCode=0 Nov 24 08:30:02 crc kubenswrapper[4691]: I1124 08:30:02.193292 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399550-sh8qx" event={"ID":"d8dac9fc-69b6-4899-a8fd-7aa75e002329","Type":"ContainerDied","Data":"d561ae0646a0431ec713024573bcdd28dc487e226753c4ef6fe075f36b63b97e"} Nov 24 08:30:03 crc kubenswrapper[4691]: I1124 08:30:03.540360 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399550-sh8qx" Nov 24 08:30:03 crc kubenswrapper[4691]: I1124 08:30:03.682627 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d8dac9fc-69b6-4899-a8fd-7aa75e002329-secret-volume\") pod \"d8dac9fc-69b6-4899-a8fd-7aa75e002329\" (UID: \"d8dac9fc-69b6-4899-a8fd-7aa75e002329\") " Nov 24 08:30:03 crc kubenswrapper[4691]: I1124 08:30:03.683064 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d8dac9fc-69b6-4899-a8fd-7aa75e002329-config-volume\") pod \"d8dac9fc-69b6-4899-a8fd-7aa75e002329\" (UID: \"d8dac9fc-69b6-4899-a8fd-7aa75e002329\") " Nov 24 08:30:03 crc kubenswrapper[4691]: I1124 08:30:03.683583 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d8dac9fc-69b6-4899-a8fd-7aa75e002329-config-volume" (OuterVolumeSpecName: "config-volume") pod "d8dac9fc-69b6-4899-a8fd-7aa75e002329" (UID: "d8dac9fc-69b6-4899-a8fd-7aa75e002329"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:30:03 crc kubenswrapper[4691]: I1124 08:30:03.683827 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2crxf\" (UniqueName: \"kubernetes.io/projected/d8dac9fc-69b6-4899-a8fd-7aa75e002329-kube-api-access-2crxf\") pod \"d8dac9fc-69b6-4899-a8fd-7aa75e002329\" (UID: \"d8dac9fc-69b6-4899-a8fd-7aa75e002329\") " Nov 24 08:30:03 crc kubenswrapper[4691]: I1124 08:30:03.685064 4691 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d8dac9fc-69b6-4899-a8fd-7aa75e002329-config-volume\") on node \"crc\" DevicePath \"\"" Nov 24 08:30:03 crc kubenswrapper[4691]: I1124 08:30:03.693260 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8dac9fc-69b6-4899-a8fd-7aa75e002329-kube-api-access-2crxf" (OuterVolumeSpecName: "kube-api-access-2crxf") pod "d8dac9fc-69b6-4899-a8fd-7aa75e002329" (UID: "d8dac9fc-69b6-4899-a8fd-7aa75e002329"). InnerVolumeSpecName "kube-api-access-2crxf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:30:03 crc kubenswrapper[4691]: I1124 08:30:03.699585 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8dac9fc-69b6-4899-a8fd-7aa75e002329-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "d8dac9fc-69b6-4899-a8fd-7aa75e002329" (UID: "d8dac9fc-69b6-4899-a8fd-7aa75e002329"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:30:03 crc kubenswrapper[4691]: I1124 08:30:03.786746 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2crxf\" (UniqueName: \"kubernetes.io/projected/d8dac9fc-69b6-4899-a8fd-7aa75e002329-kube-api-access-2crxf\") on node \"crc\" DevicePath \"\"" Nov 24 08:30:03 crc kubenswrapper[4691]: I1124 08:30:03.786784 4691 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d8dac9fc-69b6-4899-a8fd-7aa75e002329-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 24 08:30:04 crc kubenswrapper[4691]: I1124 08:30:04.215432 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399550-sh8qx" event={"ID":"d8dac9fc-69b6-4899-a8fd-7aa75e002329","Type":"ContainerDied","Data":"0f7307c3c291f5941cbdac239d29a3c02522c84532e3885670c74808616cffb8"} Nov 24 08:30:04 crc kubenswrapper[4691]: I1124 08:30:04.215513 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0f7307c3c291f5941cbdac239d29a3c02522c84532e3885670c74808616cffb8" Nov 24 08:30:04 crc kubenswrapper[4691]: I1124 08:30:04.215608 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399550-sh8qx" Nov 24 08:30:21 crc kubenswrapper[4691]: I1124 08:30:21.088948 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:30:21 crc kubenswrapper[4691]: I1124 08:30:21.089491 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:30:51 crc kubenswrapper[4691]: I1124 08:30:51.089163 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:30:51 crc kubenswrapper[4691]: I1124 08:30:51.089828 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:31:21 crc kubenswrapper[4691]: I1124 08:31:21.089529 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:31:21 crc kubenswrapper[4691]: I1124 08:31:21.090168 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:31:21 crc kubenswrapper[4691]: I1124 08:31:21.090216 4691 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 08:31:21 crc kubenswrapper[4691]: I1124 08:31:21.090956 4691 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"70419c902bba2a0ee14c9bd0fd9567bf6662a2111ab103dd012ed4d7572a55ae"} pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 08:31:21 crc kubenswrapper[4691]: I1124 08:31:21.091013 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" containerID="cri-o://70419c902bba2a0ee14c9bd0fd9567bf6662a2111ab103dd012ed4d7572a55ae" gracePeriod=600 Nov 24 08:31:22 crc kubenswrapper[4691]: I1124 08:31:22.017555 4691 generic.go:334] "Generic (PLEG): container finished" podID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerID="70419c902bba2a0ee14c9bd0fd9567bf6662a2111ab103dd012ed4d7572a55ae" exitCode=0 Nov 24 08:31:22 crc kubenswrapper[4691]: I1124 08:31:22.017601 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerDied","Data":"70419c902bba2a0ee14c9bd0fd9567bf6662a2111ab103dd012ed4d7572a55ae"} Nov 24 08:31:22 crc kubenswrapper[4691]: I1124 08:31:22.018178 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerStarted","Data":"e7b637f4980bbb9ac5b384cec7b1f94c6dc365ccdf664c7e8d7855d6a60bff20"} Nov 24 08:31:22 crc kubenswrapper[4691]: I1124 08:31:22.018215 4691 scope.go:117] "RemoveContainer" containerID="baafdc5484696f8a6ed3299009fbc07f5ebbc71075e6742f1979ca5e2e2084cf" Nov 24 08:31:51 crc kubenswrapper[4691]: I1124 08:31:51.099534 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-pc4w8"] Nov 24 08:31:51 crc kubenswrapper[4691]: E1124 08:31:51.100756 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8dac9fc-69b6-4899-a8fd-7aa75e002329" containerName="collect-profiles" Nov 24 08:31:51 crc kubenswrapper[4691]: I1124 08:31:51.100774 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8dac9fc-69b6-4899-a8fd-7aa75e002329" containerName="collect-profiles" Nov 24 08:31:51 crc kubenswrapper[4691]: I1124 08:31:51.100997 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8dac9fc-69b6-4899-a8fd-7aa75e002329" containerName="collect-profiles" Nov 24 08:31:51 crc kubenswrapper[4691]: I1124 08:31:51.108801 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pc4w8" Nov 24 08:31:51 crc kubenswrapper[4691]: I1124 08:31:51.121263 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pc4w8"] Nov 24 08:31:51 crc kubenswrapper[4691]: I1124 08:31:51.196224 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6802515e-54e3-4a7f-a4ce-ada8a63c6fa3-catalog-content\") pod \"community-operators-pc4w8\" (UID: \"6802515e-54e3-4a7f-a4ce-ada8a63c6fa3\") " pod="openshift-marketplace/community-operators-pc4w8" Nov 24 08:31:51 crc kubenswrapper[4691]: I1124 08:31:51.196310 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-skdvn\" (UniqueName: \"kubernetes.io/projected/6802515e-54e3-4a7f-a4ce-ada8a63c6fa3-kube-api-access-skdvn\") pod \"community-operators-pc4w8\" (UID: \"6802515e-54e3-4a7f-a4ce-ada8a63c6fa3\") " pod="openshift-marketplace/community-operators-pc4w8" Nov 24 08:31:51 crc kubenswrapper[4691]: I1124 08:31:51.196425 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6802515e-54e3-4a7f-a4ce-ada8a63c6fa3-utilities\") pod \"community-operators-pc4w8\" (UID: \"6802515e-54e3-4a7f-a4ce-ada8a63c6fa3\") " pod="openshift-marketplace/community-operators-pc4w8" Nov 24 08:31:51 crc kubenswrapper[4691]: I1124 08:31:51.298540 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6802515e-54e3-4a7f-a4ce-ada8a63c6fa3-utilities\") pod \"community-operators-pc4w8\" (UID: \"6802515e-54e3-4a7f-a4ce-ada8a63c6fa3\") " pod="openshift-marketplace/community-operators-pc4w8" Nov 24 08:31:51 crc kubenswrapper[4691]: I1124 08:31:51.298731 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6802515e-54e3-4a7f-a4ce-ada8a63c6fa3-catalog-content\") pod \"community-operators-pc4w8\" (UID: \"6802515e-54e3-4a7f-a4ce-ada8a63c6fa3\") " pod="openshift-marketplace/community-operators-pc4w8" Nov 24 08:31:51 crc kubenswrapper[4691]: I1124 08:31:51.298865 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-skdvn\" (UniqueName: \"kubernetes.io/projected/6802515e-54e3-4a7f-a4ce-ada8a63c6fa3-kube-api-access-skdvn\") pod \"community-operators-pc4w8\" (UID: \"6802515e-54e3-4a7f-a4ce-ada8a63c6fa3\") " pod="openshift-marketplace/community-operators-pc4w8" Nov 24 08:31:51 crc kubenswrapper[4691]: I1124 08:31:51.299148 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6802515e-54e3-4a7f-a4ce-ada8a63c6fa3-utilities\") pod \"community-operators-pc4w8\" (UID: \"6802515e-54e3-4a7f-a4ce-ada8a63c6fa3\") " pod="openshift-marketplace/community-operators-pc4w8" Nov 24 08:31:51 crc kubenswrapper[4691]: I1124 08:31:51.299191 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6802515e-54e3-4a7f-a4ce-ada8a63c6fa3-catalog-content\") pod \"community-operators-pc4w8\" (UID: \"6802515e-54e3-4a7f-a4ce-ada8a63c6fa3\") " pod="openshift-marketplace/community-operators-pc4w8" Nov 24 08:31:51 crc kubenswrapper[4691]: I1124 08:31:51.321501 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-skdvn\" (UniqueName: \"kubernetes.io/projected/6802515e-54e3-4a7f-a4ce-ada8a63c6fa3-kube-api-access-skdvn\") pod \"community-operators-pc4w8\" (UID: \"6802515e-54e3-4a7f-a4ce-ada8a63c6fa3\") " pod="openshift-marketplace/community-operators-pc4w8" Nov 24 08:31:51 crc kubenswrapper[4691]: I1124 08:31:51.443535 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pc4w8" Nov 24 08:31:51 crc kubenswrapper[4691]: I1124 08:31:51.989058 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pc4w8"] Nov 24 08:31:51 crc kubenswrapper[4691]: W1124 08:31:51.996683 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6802515e_54e3_4a7f_a4ce_ada8a63c6fa3.slice/crio-8af0d3acc74d6eb02dfd3872eaca5037a60602fe4a7f580a6dc443c3449dfb55 WatchSource:0}: Error finding container 8af0d3acc74d6eb02dfd3872eaca5037a60602fe4a7f580a6dc443c3449dfb55: Status 404 returned error can't find the container with id 8af0d3acc74d6eb02dfd3872eaca5037a60602fe4a7f580a6dc443c3449dfb55 Nov 24 08:31:52 crc kubenswrapper[4691]: I1124 08:31:52.312030 4691 generic.go:334] "Generic (PLEG): container finished" podID="6802515e-54e3-4a7f-a4ce-ada8a63c6fa3" containerID="9ec6fe87c6468fe95039fe00e0682bdd17acfe5f2a1c664b7695652ce7c6138b" exitCode=0 Nov 24 08:31:52 crc kubenswrapper[4691]: I1124 08:31:52.312262 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pc4w8" event={"ID":"6802515e-54e3-4a7f-a4ce-ada8a63c6fa3","Type":"ContainerDied","Data":"9ec6fe87c6468fe95039fe00e0682bdd17acfe5f2a1c664b7695652ce7c6138b"} Nov 24 08:31:52 crc kubenswrapper[4691]: I1124 08:31:52.312536 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pc4w8" event={"ID":"6802515e-54e3-4a7f-a4ce-ada8a63c6fa3","Type":"ContainerStarted","Data":"8af0d3acc74d6eb02dfd3872eaca5037a60602fe4a7f580a6dc443c3449dfb55"} Nov 24 08:31:53 crc kubenswrapper[4691]: I1124 08:31:53.356090 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pc4w8" event={"ID":"6802515e-54e3-4a7f-a4ce-ada8a63c6fa3","Type":"ContainerStarted","Data":"134cfec48ec9e862c4093088196e777c248358952d48b0613371e362d55232ad"} Nov 24 08:31:54 crc kubenswrapper[4691]: I1124 08:31:54.368650 4691 generic.go:334] "Generic (PLEG): container finished" podID="6802515e-54e3-4a7f-a4ce-ada8a63c6fa3" containerID="134cfec48ec9e862c4093088196e777c248358952d48b0613371e362d55232ad" exitCode=0 Nov 24 08:31:54 crc kubenswrapper[4691]: I1124 08:31:54.368741 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pc4w8" event={"ID":"6802515e-54e3-4a7f-a4ce-ada8a63c6fa3","Type":"ContainerDied","Data":"134cfec48ec9e862c4093088196e777c248358952d48b0613371e362d55232ad"} Nov 24 08:31:55 crc kubenswrapper[4691]: I1124 08:31:55.381281 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pc4w8" event={"ID":"6802515e-54e3-4a7f-a4ce-ada8a63c6fa3","Type":"ContainerStarted","Data":"e66f7fd51832547c6aaace976a094696f7d8f7d8b704ab4f64caf9e7475f9880"} Nov 24 08:31:55 crc kubenswrapper[4691]: I1124 08:31:55.412782 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-pc4w8" podStartSLOduration=1.935314325 podStartE2EDuration="4.412749148s" podCreationTimestamp="2025-11-24 08:31:51 +0000 UTC" firstStartedPulling="2025-11-24 08:31:52.314357608 +0000 UTC m=+2074.313306857" lastFinishedPulling="2025-11-24 08:31:54.791792421 +0000 UTC m=+2076.790741680" observedRunningTime="2025-11-24 08:31:55.405912724 +0000 UTC m=+2077.404861983" watchObservedRunningTime="2025-11-24 08:31:55.412749148 +0000 UTC m=+2077.411698397" Nov 24 08:31:58 crc kubenswrapper[4691]: I1124 08:31:58.050087 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-fcd4b"] Nov 24 08:31:58 crc kubenswrapper[4691]: I1124 08:31:58.052573 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fcd4b" Nov 24 08:31:58 crc kubenswrapper[4691]: I1124 08:31:58.070219 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fcd4b"] Nov 24 08:31:58 crc kubenswrapper[4691]: I1124 08:31:58.135668 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16befb19-eef7-4a4d-8e1e-00c98b5c375c-catalog-content\") pod \"certified-operators-fcd4b\" (UID: \"16befb19-eef7-4a4d-8e1e-00c98b5c375c\") " pod="openshift-marketplace/certified-operators-fcd4b" Nov 24 08:31:58 crc kubenswrapper[4691]: I1124 08:31:58.136041 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrwzq\" (UniqueName: \"kubernetes.io/projected/16befb19-eef7-4a4d-8e1e-00c98b5c375c-kube-api-access-nrwzq\") pod \"certified-operators-fcd4b\" (UID: \"16befb19-eef7-4a4d-8e1e-00c98b5c375c\") " pod="openshift-marketplace/certified-operators-fcd4b" Nov 24 08:31:58 crc kubenswrapper[4691]: I1124 08:31:58.136067 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16befb19-eef7-4a4d-8e1e-00c98b5c375c-utilities\") pod \"certified-operators-fcd4b\" (UID: \"16befb19-eef7-4a4d-8e1e-00c98b5c375c\") " pod="openshift-marketplace/certified-operators-fcd4b" Nov 24 08:31:58 crc kubenswrapper[4691]: I1124 08:31:58.238689 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrwzq\" (UniqueName: \"kubernetes.io/projected/16befb19-eef7-4a4d-8e1e-00c98b5c375c-kube-api-access-nrwzq\") pod \"certified-operators-fcd4b\" (UID: \"16befb19-eef7-4a4d-8e1e-00c98b5c375c\") " pod="openshift-marketplace/certified-operators-fcd4b" Nov 24 08:31:58 crc kubenswrapper[4691]: I1124 08:31:58.238762 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16befb19-eef7-4a4d-8e1e-00c98b5c375c-utilities\") pod \"certified-operators-fcd4b\" (UID: \"16befb19-eef7-4a4d-8e1e-00c98b5c375c\") " pod="openshift-marketplace/certified-operators-fcd4b" Nov 24 08:31:58 crc kubenswrapper[4691]: I1124 08:31:58.239041 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16befb19-eef7-4a4d-8e1e-00c98b5c375c-catalog-content\") pod \"certified-operators-fcd4b\" (UID: \"16befb19-eef7-4a4d-8e1e-00c98b5c375c\") " pod="openshift-marketplace/certified-operators-fcd4b" Nov 24 08:31:58 crc kubenswrapper[4691]: I1124 08:31:58.239893 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16befb19-eef7-4a4d-8e1e-00c98b5c375c-catalog-content\") pod \"certified-operators-fcd4b\" (UID: \"16befb19-eef7-4a4d-8e1e-00c98b5c375c\") " pod="openshift-marketplace/certified-operators-fcd4b" Nov 24 08:31:58 crc kubenswrapper[4691]: I1124 08:31:58.240214 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16befb19-eef7-4a4d-8e1e-00c98b5c375c-utilities\") pod \"certified-operators-fcd4b\" (UID: \"16befb19-eef7-4a4d-8e1e-00c98b5c375c\") " pod="openshift-marketplace/certified-operators-fcd4b" Nov 24 08:31:58 crc kubenswrapper[4691]: I1124 08:31:58.261493 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrwzq\" (UniqueName: \"kubernetes.io/projected/16befb19-eef7-4a4d-8e1e-00c98b5c375c-kube-api-access-nrwzq\") pod \"certified-operators-fcd4b\" (UID: \"16befb19-eef7-4a4d-8e1e-00c98b5c375c\") " pod="openshift-marketplace/certified-operators-fcd4b" Nov 24 08:31:58 crc kubenswrapper[4691]: I1124 08:31:58.375036 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fcd4b" Nov 24 08:31:58 crc kubenswrapper[4691]: I1124 08:31:58.901929 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fcd4b"] Nov 24 08:31:58 crc kubenswrapper[4691]: W1124 08:31:58.905880 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod16befb19_eef7_4a4d_8e1e_00c98b5c375c.slice/crio-8b58d4da80a917524f40aa88ff1d231e8568dac9bf3f0d1f32e08eaf9be97f5a WatchSource:0}: Error finding container 8b58d4da80a917524f40aa88ff1d231e8568dac9bf3f0d1f32e08eaf9be97f5a: Status 404 returned error can't find the container with id 8b58d4da80a917524f40aa88ff1d231e8568dac9bf3f0d1f32e08eaf9be97f5a Nov 24 08:31:59 crc kubenswrapper[4691]: I1124 08:31:59.436077 4691 generic.go:334] "Generic (PLEG): container finished" podID="16befb19-eef7-4a4d-8e1e-00c98b5c375c" containerID="4be3770b11d5d68af4c7b37161c8ae1670220c2772b0fe8603954a1b1a127ce2" exitCode=0 Nov 24 08:31:59 crc kubenswrapper[4691]: I1124 08:31:59.436187 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fcd4b" event={"ID":"16befb19-eef7-4a4d-8e1e-00c98b5c375c","Type":"ContainerDied","Data":"4be3770b11d5d68af4c7b37161c8ae1670220c2772b0fe8603954a1b1a127ce2"} Nov 24 08:31:59 crc kubenswrapper[4691]: I1124 08:31:59.436525 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fcd4b" event={"ID":"16befb19-eef7-4a4d-8e1e-00c98b5c375c","Type":"ContainerStarted","Data":"8b58d4da80a917524f40aa88ff1d231e8568dac9bf3f0d1f32e08eaf9be97f5a"} Nov 24 08:32:00 crc kubenswrapper[4691]: I1124 08:32:00.447154 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fcd4b" event={"ID":"16befb19-eef7-4a4d-8e1e-00c98b5c375c","Type":"ContainerStarted","Data":"e05b7d4eeb1443494197739ed584bb945e8455e3aa5b928258d76de3053d71b1"} Nov 24 08:32:01 crc kubenswrapper[4691]: I1124 08:32:01.443692 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-pc4w8" Nov 24 08:32:01 crc kubenswrapper[4691]: I1124 08:32:01.444068 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-pc4w8" Nov 24 08:32:01 crc kubenswrapper[4691]: I1124 08:32:01.461591 4691 generic.go:334] "Generic (PLEG): container finished" podID="16befb19-eef7-4a4d-8e1e-00c98b5c375c" containerID="e05b7d4eeb1443494197739ed584bb945e8455e3aa5b928258d76de3053d71b1" exitCode=0 Nov 24 08:32:01 crc kubenswrapper[4691]: I1124 08:32:01.461642 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fcd4b" event={"ID":"16befb19-eef7-4a4d-8e1e-00c98b5c375c","Type":"ContainerDied","Data":"e05b7d4eeb1443494197739ed584bb945e8455e3aa5b928258d76de3053d71b1"} Nov 24 08:32:01 crc kubenswrapper[4691]: I1124 08:32:01.521530 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-pc4w8" Nov 24 08:32:01 crc kubenswrapper[4691]: I1124 08:32:01.574424 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-pc4w8" Nov 24 08:32:02 crc kubenswrapper[4691]: I1124 08:32:02.475547 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fcd4b" event={"ID":"16befb19-eef7-4a4d-8e1e-00c98b5c375c","Type":"ContainerStarted","Data":"cfd99b81eb5f1d95d29679ff3af39c332b4a8b4219a07cfc175722b1df7bda6d"} Nov 24 08:32:02 crc kubenswrapper[4691]: I1124 08:32:02.500595 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-fcd4b" podStartSLOduration=2.014107854 podStartE2EDuration="4.500579935s" podCreationTimestamp="2025-11-24 08:31:58 +0000 UTC" firstStartedPulling="2025-11-24 08:31:59.437753567 +0000 UTC m=+2081.436702816" lastFinishedPulling="2025-11-24 08:32:01.924225648 +0000 UTC m=+2083.923174897" observedRunningTime="2025-11-24 08:32:02.49618876 +0000 UTC m=+2084.495138019" watchObservedRunningTime="2025-11-24 08:32:02.500579935 +0000 UTC m=+2084.499529184" Nov 24 08:32:06 crc kubenswrapper[4691]: I1124 08:32:06.838669 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pc4w8"] Nov 24 08:32:06 crc kubenswrapper[4691]: I1124 08:32:06.839876 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-pc4w8" podUID="6802515e-54e3-4a7f-a4ce-ada8a63c6fa3" containerName="registry-server" containerID="cri-o://e66f7fd51832547c6aaace976a094696f7d8f7d8b704ab4f64caf9e7475f9880" gracePeriod=2 Nov 24 08:32:07 crc kubenswrapper[4691]: I1124 08:32:07.319981 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pc4w8" Nov 24 08:32:07 crc kubenswrapper[4691]: I1124 08:32:07.442534 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-skdvn\" (UniqueName: \"kubernetes.io/projected/6802515e-54e3-4a7f-a4ce-ada8a63c6fa3-kube-api-access-skdvn\") pod \"6802515e-54e3-4a7f-a4ce-ada8a63c6fa3\" (UID: \"6802515e-54e3-4a7f-a4ce-ada8a63c6fa3\") " Nov 24 08:32:07 crc kubenswrapper[4691]: I1124 08:32:07.442745 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6802515e-54e3-4a7f-a4ce-ada8a63c6fa3-catalog-content\") pod \"6802515e-54e3-4a7f-a4ce-ada8a63c6fa3\" (UID: \"6802515e-54e3-4a7f-a4ce-ada8a63c6fa3\") " Nov 24 08:32:07 crc kubenswrapper[4691]: I1124 08:32:07.442779 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6802515e-54e3-4a7f-a4ce-ada8a63c6fa3-utilities\") pod \"6802515e-54e3-4a7f-a4ce-ada8a63c6fa3\" (UID: \"6802515e-54e3-4a7f-a4ce-ada8a63c6fa3\") " Nov 24 08:32:07 crc kubenswrapper[4691]: I1124 08:32:07.443986 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6802515e-54e3-4a7f-a4ce-ada8a63c6fa3-utilities" (OuterVolumeSpecName: "utilities") pod "6802515e-54e3-4a7f-a4ce-ada8a63c6fa3" (UID: "6802515e-54e3-4a7f-a4ce-ada8a63c6fa3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:32:07 crc kubenswrapper[4691]: I1124 08:32:07.455273 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6802515e-54e3-4a7f-a4ce-ada8a63c6fa3-kube-api-access-skdvn" (OuterVolumeSpecName: "kube-api-access-skdvn") pod "6802515e-54e3-4a7f-a4ce-ada8a63c6fa3" (UID: "6802515e-54e3-4a7f-a4ce-ada8a63c6fa3"). InnerVolumeSpecName "kube-api-access-skdvn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:32:07 crc kubenswrapper[4691]: I1124 08:32:07.489696 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6802515e-54e3-4a7f-a4ce-ada8a63c6fa3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6802515e-54e3-4a7f-a4ce-ada8a63c6fa3" (UID: "6802515e-54e3-4a7f-a4ce-ada8a63c6fa3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:32:07 crc kubenswrapper[4691]: I1124 08:32:07.531392 4691 generic.go:334] "Generic (PLEG): container finished" podID="6802515e-54e3-4a7f-a4ce-ada8a63c6fa3" containerID="e66f7fd51832547c6aaace976a094696f7d8f7d8b704ab4f64caf9e7475f9880" exitCode=0 Nov 24 08:32:07 crc kubenswrapper[4691]: I1124 08:32:07.531434 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pc4w8" event={"ID":"6802515e-54e3-4a7f-a4ce-ada8a63c6fa3","Type":"ContainerDied","Data":"e66f7fd51832547c6aaace976a094696f7d8f7d8b704ab4f64caf9e7475f9880"} Nov 24 08:32:07 crc kubenswrapper[4691]: I1124 08:32:07.531480 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pc4w8" event={"ID":"6802515e-54e3-4a7f-a4ce-ada8a63c6fa3","Type":"ContainerDied","Data":"8af0d3acc74d6eb02dfd3872eaca5037a60602fe4a7f580a6dc443c3449dfb55"} Nov 24 08:32:07 crc kubenswrapper[4691]: I1124 08:32:07.531497 4691 scope.go:117] "RemoveContainer" containerID="e66f7fd51832547c6aaace976a094696f7d8f7d8b704ab4f64caf9e7475f9880" Nov 24 08:32:07 crc kubenswrapper[4691]: I1124 08:32:07.531629 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pc4w8" Nov 24 08:32:07 crc kubenswrapper[4691]: I1124 08:32:07.545623 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-skdvn\" (UniqueName: \"kubernetes.io/projected/6802515e-54e3-4a7f-a4ce-ada8a63c6fa3-kube-api-access-skdvn\") on node \"crc\" DevicePath \"\"" Nov 24 08:32:07 crc kubenswrapper[4691]: I1124 08:32:07.545660 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6802515e-54e3-4a7f-a4ce-ada8a63c6fa3-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 08:32:07 crc kubenswrapper[4691]: I1124 08:32:07.545669 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6802515e-54e3-4a7f-a4ce-ada8a63c6fa3-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 08:32:07 crc kubenswrapper[4691]: I1124 08:32:07.568862 4691 scope.go:117] "RemoveContainer" containerID="134cfec48ec9e862c4093088196e777c248358952d48b0613371e362d55232ad" Nov 24 08:32:07 crc kubenswrapper[4691]: I1124 08:32:07.579592 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pc4w8"] Nov 24 08:32:07 crc kubenswrapper[4691]: I1124 08:32:07.588689 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-pc4w8"] Nov 24 08:32:07 crc kubenswrapper[4691]: I1124 08:32:07.597540 4691 scope.go:117] "RemoveContainer" containerID="9ec6fe87c6468fe95039fe00e0682bdd17acfe5f2a1c664b7695652ce7c6138b" Nov 24 08:32:07 crc kubenswrapper[4691]: I1124 08:32:07.642351 4691 scope.go:117] "RemoveContainer" containerID="e66f7fd51832547c6aaace976a094696f7d8f7d8b704ab4f64caf9e7475f9880" Nov 24 08:32:07 crc kubenswrapper[4691]: E1124 08:32:07.642802 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e66f7fd51832547c6aaace976a094696f7d8f7d8b704ab4f64caf9e7475f9880\": container with ID starting with e66f7fd51832547c6aaace976a094696f7d8f7d8b704ab4f64caf9e7475f9880 not found: ID does not exist" containerID="e66f7fd51832547c6aaace976a094696f7d8f7d8b704ab4f64caf9e7475f9880" Nov 24 08:32:07 crc kubenswrapper[4691]: I1124 08:32:07.642841 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e66f7fd51832547c6aaace976a094696f7d8f7d8b704ab4f64caf9e7475f9880"} err="failed to get container status \"e66f7fd51832547c6aaace976a094696f7d8f7d8b704ab4f64caf9e7475f9880\": rpc error: code = NotFound desc = could not find container \"e66f7fd51832547c6aaace976a094696f7d8f7d8b704ab4f64caf9e7475f9880\": container with ID starting with e66f7fd51832547c6aaace976a094696f7d8f7d8b704ab4f64caf9e7475f9880 not found: ID does not exist" Nov 24 08:32:07 crc kubenswrapper[4691]: I1124 08:32:07.642869 4691 scope.go:117] "RemoveContainer" containerID="134cfec48ec9e862c4093088196e777c248358952d48b0613371e362d55232ad" Nov 24 08:32:07 crc kubenswrapper[4691]: E1124 08:32:07.643113 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"134cfec48ec9e862c4093088196e777c248358952d48b0613371e362d55232ad\": container with ID starting with 134cfec48ec9e862c4093088196e777c248358952d48b0613371e362d55232ad not found: ID does not exist" containerID="134cfec48ec9e862c4093088196e777c248358952d48b0613371e362d55232ad" Nov 24 08:32:07 crc kubenswrapper[4691]: I1124 08:32:07.643140 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"134cfec48ec9e862c4093088196e777c248358952d48b0613371e362d55232ad"} err="failed to get container status \"134cfec48ec9e862c4093088196e777c248358952d48b0613371e362d55232ad\": rpc error: code = NotFound desc = could not find container \"134cfec48ec9e862c4093088196e777c248358952d48b0613371e362d55232ad\": container with ID starting with 134cfec48ec9e862c4093088196e777c248358952d48b0613371e362d55232ad not found: ID does not exist" Nov 24 08:32:07 crc kubenswrapper[4691]: I1124 08:32:07.643159 4691 scope.go:117] "RemoveContainer" containerID="9ec6fe87c6468fe95039fe00e0682bdd17acfe5f2a1c664b7695652ce7c6138b" Nov 24 08:32:07 crc kubenswrapper[4691]: E1124 08:32:07.643376 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ec6fe87c6468fe95039fe00e0682bdd17acfe5f2a1c664b7695652ce7c6138b\": container with ID starting with 9ec6fe87c6468fe95039fe00e0682bdd17acfe5f2a1c664b7695652ce7c6138b not found: ID does not exist" containerID="9ec6fe87c6468fe95039fe00e0682bdd17acfe5f2a1c664b7695652ce7c6138b" Nov 24 08:32:07 crc kubenswrapper[4691]: I1124 08:32:07.643401 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ec6fe87c6468fe95039fe00e0682bdd17acfe5f2a1c664b7695652ce7c6138b"} err="failed to get container status \"9ec6fe87c6468fe95039fe00e0682bdd17acfe5f2a1c664b7695652ce7c6138b\": rpc error: code = NotFound desc = could not find container \"9ec6fe87c6468fe95039fe00e0682bdd17acfe5f2a1c664b7695652ce7c6138b\": container with ID starting with 9ec6fe87c6468fe95039fe00e0682bdd17acfe5f2a1c664b7695652ce7c6138b not found: ID does not exist" Nov 24 08:32:08 crc kubenswrapper[4691]: I1124 08:32:08.375833 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-fcd4b" Nov 24 08:32:08 crc kubenswrapper[4691]: I1124 08:32:08.375954 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-fcd4b" Nov 24 08:32:08 crc kubenswrapper[4691]: I1124 08:32:08.452081 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-fcd4b" Nov 24 08:32:08 crc kubenswrapper[4691]: I1124 08:32:08.602884 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-fcd4b" Nov 24 08:32:08 crc kubenswrapper[4691]: I1124 08:32:08.783975 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6802515e-54e3-4a7f-a4ce-ada8a63c6fa3" path="/var/lib/kubelet/pods/6802515e-54e3-4a7f-a4ce-ada8a63c6fa3/volumes" Nov 24 08:32:10 crc kubenswrapper[4691]: I1124 08:32:10.237560 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fcd4b"] Nov 24 08:32:10 crc kubenswrapper[4691]: I1124 08:32:10.564112 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-fcd4b" podUID="16befb19-eef7-4a4d-8e1e-00c98b5c375c" containerName="registry-server" containerID="cri-o://cfd99b81eb5f1d95d29679ff3af39c332b4a8b4219a07cfc175722b1df7bda6d" gracePeriod=2 Nov 24 08:32:11 crc kubenswrapper[4691]: I1124 08:32:11.061229 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fcd4b" Nov 24 08:32:11 crc kubenswrapper[4691]: I1124 08:32:11.118132 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16befb19-eef7-4a4d-8e1e-00c98b5c375c-catalog-content\") pod \"16befb19-eef7-4a4d-8e1e-00c98b5c375c\" (UID: \"16befb19-eef7-4a4d-8e1e-00c98b5c375c\") " Nov 24 08:32:11 crc kubenswrapper[4691]: I1124 08:32:11.118744 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16befb19-eef7-4a4d-8e1e-00c98b5c375c-utilities\") pod \"16befb19-eef7-4a4d-8e1e-00c98b5c375c\" (UID: \"16befb19-eef7-4a4d-8e1e-00c98b5c375c\") " Nov 24 08:32:11 crc kubenswrapper[4691]: I1124 08:32:11.118797 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nrwzq\" (UniqueName: \"kubernetes.io/projected/16befb19-eef7-4a4d-8e1e-00c98b5c375c-kube-api-access-nrwzq\") pod \"16befb19-eef7-4a4d-8e1e-00c98b5c375c\" (UID: \"16befb19-eef7-4a4d-8e1e-00c98b5c375c\") " Nov 24 08:32:11 crc kubenswrapper[4691]: I1124 08:32:11.123665 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/16befb19-eef7-4a4d-8e1e-00c98b5c375c-utilities" (OuterVolumeSpecName: "utilities") pod "16befb19-eef7-4a4d-8e1e-00c98b5c375c" (UID: "16befb19-eef7-4a4d-8e1e-00c98b5c375c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:32:11 crc kubenswrapper[4691]: I1124 08:32:11.136860 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16befb19-eef7-4a4d-8e1e-00c98b5c375c-kube-api-access-nrwzq" (OuterVolumeSpecName: "kube-api-access-nrwzq") pod "16befb19-eef7-4a4d-8e1e-00c98b5c375c" (UID: "16befb19-eef7-4a4d-8e1e-00c98b5c375c"). InnerVolumeSpecName "kube-api-access-nrwzq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:32:11 crc kubenswrapper[4691]: I1124 08:32:11.221070 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/16befb19-eef7-4a4d-8e1e-00c98b5c375c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "16befb19-eef7-4a4d-8e1e-00c98b5c375c" (UID: "16befb19-eef7-4a4d-8e1e-00c98b5c375c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:32:11 crc kubenswrapper[4691]: I1124 08:32:11.221249 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16befb19-eef7-4a4d-8e1e-00c98b5c375c-catalog-content\") pod \"16befb19-eef7-4a4d-8e1e-00c98b5c375c\" (UID: \"16befb19-eef7-4a4d-8e1e-00c98b5c375c\") " Nov 24 08:32:11 crc kubenswrapper[4691]: I1124 08:32:11.221917 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16befb19-eef7-4a4d-8e1e-00c98b5c375c-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 08:32:11 crc kubenswrapper[4691]: I1124 08:32:11.221946 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nrwzq\" (UniqueName: \"kubernetes.io/projected/16befb19-eef7-4a4d-8e1e-00c98b5c375c-kube-api-access-nrwzq\") on node \"crc\" DevicePath \"\"" Nov 24 08:32:11 crc kubenswrapper[4691]: W1124 08:32:11.222020 4691 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/16befb19-eef7-4a4d-8e1e-00c98b5c375c/volumes/kubernetes.io~empty-dir/catalog-content Nov 24 08:32:11 crc kubenswrapper[4691]: I1124 08:32:11.222032 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/16befb19-eef7-4a4d-8e1e-00c98b5c375c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "16befb19-eef7-4a4d-8e1e-00c98b5c375c" (UID: "16befb19-eef7-4a4d-8e1e-00c98b5c375c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:32:11 crc kubenswrapper[4691]: I1124 08:32:11.323420 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16befb19-eef7-4a4d-8e1e-00c98b5c375c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 08:32:11 crc kubenswrapper[4691]: I1124 08:32:11.579224 4691 generic.go:334] "Generic (PLEG): container finished" podID="16befb19-eef7-4a4d-8e1e-00c98b5c375c" containerID="cfd99b81eb5f1d95d29679ff3af39c332b4a8b4219a07cfc175722b1df7bda6d" exitCode=0 Nov 24 08:32:11 crc kubenswrapper[4691]: I1124 08:32:11.579293 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fcd4b" event={"ID":"16befb19-eef7-4a4d-8e1e-00c98b5c375c","Type":"ContainerDied","Data":"cfd99b81eb5f1d95d29679ff3af39c332b4a8b4219a07cfc175722b1df7bda6d"} Nov 24 08:32:11 crc kubenswrapper[4691]: I1124 08:32:11.579343 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fcd4b" event={"ID":"16befb19-eef7-4a4d-8e1e-00c98b5c375c","Type":"ContainerDied","Data":"8b58d4da80a917524f40aa88ff1d231e8568dac9bf3f0d1f32e08eaf9be97f5a"} Nov 24 08:32:11 crc kubenswrapper[4691]: I1124 08:32:11.579346 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fcd4b" Nov 24 08:32:11 crc kubenswrapper[4691]: I1124 08:32:11.579373 4691 scope.go:117] "RemoveContainer" containerID="cfd99b81eb5f1d95d29679ff3af39c332b4a8b4219a07cfc175722b1df7bda6d" Nov 24 08:32:11 crc kubenswrapper[4691]: I1124 08:32:11.603203 4691 scope.go:117] "RemoveContainer" containerID="e05b7d4eeb1443494197739ed584bb945e8455e3aa5b928258d76de3053d71b1" Nov 24 08:32:11 crc kubenswrapper[4691]: I1124 08:32:11.639324 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fcd4b"] Nov 24 08:32:11 crc kubenswrapper[4691]: I1124 08:32:11.641671 4691 scope.go:117] "RemoveContainer" containerID="4be3770b11d5d68af4c7b37161c8ae1670220c2772b0fe8603954a1b1a127ce2" Nov 24 08:32:11 crc kubenswrapper[4691]: I1124 08:32:11.647077 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-fcd4b"] Nov 24 08:32:11 crc kubenswrapper[4691]: I1124 08:32:11.704149 4691 scope.go:117] "RemoveContainer" containerID="cfd99b81eb5f1d95d29679ff3af39c332b4a8b4219a07cfc175722b1df7bda6d" Nov 24 08:32:11 crc kubenswrapper[4691]: E1124 08:32:11.704735 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cfd99b81eb5f1d95d29679ff3af39c332b4a8b4219a07cfc175722b1df7bda6d\": container with ID starting with cfd99b81eb5f1d95d29679ff3af39c332b4a8b4219a07cfc175722b1df7bda6d not found: ID does not exist" containerID="cfd99b81eb5f1d95d29679ff3af39c332b4a8b4219a07cfc175722b1df7bda6d" Nov 24 08:32:11 crc kubenswrapper[4691]: I1124 08:32:11.704794 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cfd99b81eb5f1d95d29679ff3af39c332b4a8b4219a07cfc175722b1df7bda6d"} err="failed to get container status \"cfd99b81eb5f1d95d29679ff3af39c332b4a8b4219a07cfc175722b1df7bda6d\": rpc error: code = NotFound desc = could not find container \"cfd99b81eb5f1d95d29679ff3af39c332b4a8b4219a07cfc175722b1df7bda6d\": container with ID starting with cfd99b81eb5f1d95d29679ff3af39c332b4a8b4219a07cfc175722b1df7bda6d not found: ID does not exist" Nov 24 08:32:11 crc kubenswrapper[4691]: I1124 08:32:11.704827 4691 scope.go:117] "RemoveContainer" containerID="e05b7d4eeb1443494197739ed584bb945e8455e3aa5b928258d76de3053d71b1" Nov 24 08:32:11 crc kubenswrapper[4691]: E1124 08:32:11.705361 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e05b7d4eeb1443494197739ed584bb945e8455e3aa5b928258d76de3053d71b1\": container with ID starting with e05b7d4eeb1443494197739ed584bb945e8455e3aa5b928258d76de3053d71b1 not found: ID does not exist" containerID="e05b7d4eeb1443494197739ed584bb945e8455e3aa5b928258d76de3053d71b1" Nov 24 08:32:11 crc kubenswrapper[4691]: I1124 08:32:11.705405 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e05b7d4eeb1443494197739ed584bb945e8455e3aa5b928258d76de3053d71b1"} err="failed to get container status \"e05b7d4eeb1443494197739ed584bb945e8455e3aa5b928258d76de3053d71b1\": rpc error: code = NotFound desc = could not find container \"e05b7d4eeb1443494197739ed584bb945e8455e3aa5b928258d76de3053d71b1\": container with ID starting with e05b7d4eeb1443494197739ed584bb945e8455e3aa5b928258d76de3053d71b1 not found: ID does not exist" Nov 24 08:32:11 crc kubenswrapper[4691]: I1124 08:32:11.705471 4691 scope.go:117] "RemoveContainer" containerID="4be3770b11d5d68af4c7b37161c8ae1670220c2772b0fe8603954a1b1a127ce2" Nov 24 08:32:11 crc kubenswrapper[4691]: E1124 08:32:11.705748 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4be3770b11d5d68af4c7b37161c8ae1670220c2772b0fe8603954a1b1a127ce2\": container with ID starting with 4be3770b11d5d68af4c7b37161c8ae1670220c2772b0fe8603954a1b1a127ce2 not found: ID does not exist" containerID="4be3770b11d5d68af4c7b37161c8ae1670220c2772b0fe8603954a1b1a127ce2" Nov 24 08:32:11 crc kubenswrapper[4691]: I1124 08:32:11.705785 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4be3770b11d5d68af4c7b37161c8ae1670220c2772b0fe8603954a1b1a127ce2"} err="failed to get container status \"4be3770b11d5d68af4c7b37161c8ae1670220c2772b0fe8603954a1b1a127ce2\": rpc error: code = NotFound desc = could not find container \"4be3770b11d5d68af4c7b37161c8ae1670220c2772b0fe8603954a1b1a127ce2\": container with ID starting with 4be3770b11d5d68af4c7b37161c8ae1670220c2772b0fe8603954a1b1a127ce2 not found: ID does not exist" Nov 24 08:32:12 crc kubenswrapper[4691]: I1124 08:32:12.780294 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="16befb19-eef7-4a4d-8e1e-00c98b5c375c" path="/var/lib/kubelet/pods/16befb19-eef7-4a4d-8e1e-00c98b5c375c/volumes" Nov 24 08:32:59 crc kubenswrapper[4691]: I1124 08:32:59.719284 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-98sj2"] Nov 24 08:32:59 crc kubenswrapper[4691]: E1124 08:32:59.720279 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6802515e-54e3-4a7f-a4ce-ada8a63c6fa3" containerName="extract-utilities" Nov 24 08:32:59 crc kubenswrapper[4691]: I1124 08:32:59.720292 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="6802515e-54e3-4a7f-a4ce-ada8a63c6fa3" containerName="extract-utilities" Nov 24 08:32:59 crc kubenswrapper[4691]: E1124 08:32:59.720308 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16befb19-eef7-4a4d-8e1e-00c98b5c375c" containerName="extract-utilities" Nov 24 08:32:59 crc kubenswrapper[4691]: I1124 08:32:59.720314 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="16befb19-eef7-4a4d-8e1e-00c98b5c375c" containerName="extract-utilities" Nov 24 08:32:59 crc kubenswrapper[4691]: E1124 08:32:59.720327 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6802515e-54e3-4a7f-a4ce-ada8a63c6fa3" containerName="registry-server" Nov 24 08:32:59 crc kubenswrapper[4691]: I1124 08:32:59.720333 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="6802515e-54e3-4a7f-a4ce-ada8a63c6fa3" containerName="registry-server" Nov 24 08:32:59 crc kubenswrapper[4691]: E1124 08:32:59.720341 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16befb19-eef7-4a4d-8e1e-00c98b5c375c" containerName="extract-content" Nov 24 08:32:59 crc kubenswrapper[4691]: I1124 08:32:59.720347 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="16befb19-eef7-4a4d-8e1e-00c98b5c375c" containerName="extract-content" Nov 24 08:32:59 crc kubenswrapper[4691]: E1124 08:32:59.720355 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16befb19-eef7-4a4d-8e1e-00c98b5c375c" containerName="registry-server" Nov 24 08:32:59 crc kubenswrapper[4691]: I1124 08:32:59.720360 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="16befb19-eef7-4a4d-8e1e-00c98b5c375c" containerName="registry-server" Nov 24 08:32:59 crc kubenswrapper[4691]: E1124 08:32:59.720382 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6802515e-54e3-4a7f-a4ce-ada8a63c6fa3" containerName="extract-content" Nov 24 08:32:59 crc kubenswrapper[4691]: I1124 08:32:59.720390 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="6802515e-54e3-4a7f-a4ce-ada8a63c6fa3" containerName="extract-content" Nov 24 08:32:59 crc kubenswrapper[4691]: I1124 08:32:59.720595 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="16befb19-eef7-4a4d-8e1e-00c98b5c375c" containerName="registry-server" Nov 24 08:32:59 crc kubenswrapper[4691]: I1124 08:32:59.720631 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="6802515e-54e3-4a7f-a4ce-ada8a63c6fa3" containerName="registry-server" Nov 24 08:32:59 crc kubenswrapper[4691]: I1124 08:32:59.722309 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-98sj2" Nov 24 08:32:59 crc kubenswrapper[4691]: I1124 08:32:59.752210 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-98sj2"] Nov 24 08:32:59 crc kubenswrapper[4691]: I1124 08:32:59.824423 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8tgbc\" (UniqueName: \"kubernetes.io/projected/c632a4f2-3e9f-4297-8ad1-296b50d34a81-kube-api-access-8tgbc\") pod \"redhat-marketplace-98sj2\" (UID: \"c632a4f2-3e9f-4297-8ad1-296b50d34a81\") " pod="openshift-marketplace/redhat-marketplace-98sj2" Nov 24 08:32:59 crc kubenswrapper[4691]: I1124 08:32:59.824874 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c632a4f2-3e9f-4297-8ad1-296b50d34a81-utilities\") pod \"redhat-marketplace-98sj2\" (UID: \"c632a4f2-3e9f-4297-8ad1-296b50d34a81\") " pod="openshift-marketplace/redhat-marketplace-98sj2" Nov 24 08:32:59 crc kubenswrapper[4691]: I1124 08:32:59.824910 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c632a4f2-3e9f-4297-8ad1-296b50d34a81-catalog-content\") pod \"redhat-marketplace-98sj2\" (UID: \"c632a4f2-3e9f-4297-8ad1-296b50d34a81\") " pod="openshift-marketplace/redhat-marketplace-98sj2" Nov 24 08:32:59 crc kubenswrapper[4691]: I1124 08:32:59.926928 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c632a4f2-3e9f-4297-8ad1-296b50d34a81-catalog-content\") pod \"redhat-marketplace-98sj2\" (UID: \"c632a4f2-3e9f-4297-8ad1-296b50d34a81\") " pod="openshift-marketplace/redhat-marketplace-98sj2" Nov 24 08:32:59 crc kubenswrapper[4691]: I1124 08:32:59.927124 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8tgbc\" (UniqueName: \"kubernetes.io/projected/c632a4f2-3e9f-4297-8ad1-296b50d34a81-kube-api-access-8tgbc\") pod \"redhat-marketplace-98sj2\" (UID: \"c632a4f2-3e9f-4297-8ad1-296b50d34a81\") " pod="openshift-marketplace/redhat-marketplace-98sj2" Nov 24 08:32:59 crc kubenswrapper[4691]: I1124 08:32:59.927152 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c632a4f2-3e9f-4297-8ad1-296b50d34a81-utilities\") pod \"redhat-marketplace-98sj2\" (UID: \"c632a4f2-3e9f-4297-8ad1-296b50d34a81\") " pod="openshift-marketplace/redhat-marketplace-98sj2" Nov 24 08:32:59 crc kubenswrapper[4691]: I1124 08:32:59.928013 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c632a4f2-3e9f-4297-8ad1-296b50d34a81-utilities\") pod \"redhat-marketplace-98sj2\" (UID: \"c632a4f2-3e9f-4297-8ad1-296b50d34a81\") " pod="openshift-marketplace/redhat-marketplace-98sj2" Nov 24 08:32:59 crc kubenswrapper[4691]: I1124 08:32:59.928026 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c632a4f2-3e9f-4297-8ad1-296b50d34a81-catalog-content\") pod \"redhat-marketplace-98sj2\" (UID: \"c632a4f2-3e9f-4297-8ad1-296b50d34a81\") " pod="openshift-marketplace/redhat-marketplace-98sj2" Nov 24 08:32:59 crc kubenswrapper[4691]: I1124 08:32:59.957610 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8tgbc\" (UniqueName: \"kubernetes.io/projected/c632a4f2-3e9f-4297-8ad1-296b50d34a81-kube-api-access-8tgbc\") pod \"redhat-marketplace-98sj2\" (UID: \"c632a4f2-3e9f-4297-8ad1-296b50d34a81\") " pod="openshift-marketplace/redhat-marketplace-98sj2" Nov 24 08:33:00 crc kubenswrapper[4691]: I1124 08:33:00.044049 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-98sj2" Nov 24 08:33:00 crc kubenswrapper[4691]: I1124 08:33:00.542103 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-98sj2"] Nov 24 08:33:01 crc kubenswrapper[4691]: I1124 08:33:01.091357 4691 generic.go:334] "Generic (PLEG): container finished" podID="c632a4f2-3e9f-4297-8ad1-296b50d34a81" containerID="35a5821aec80d3b0248125333c3806eb83ea2b37389ddcd194f19c5b5d9b5b44" exitCode=0 Nov 24 08:33:01 crc kubenswrapper[4691]: I1124 08:33:01.091637 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-98sj2" event={"ID":"c632a4f2-3e9f-4297-8ad1-296b50d34a81","Type":"ContainerDied","Data":"35a5821aec80d3b0248125333c3806eb83ea2b37389ddcd194f19c5b5d9b5b44"} Nov 24 08:33:01 crc kubenswrapper[4691]: I1124 08:33:01.091665 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-98sj2" event={"ID":"c632a4f2-3e9f-4297-8ad1-296b50d34a81","Type":"ContainerStarted","Data":"dcde523c5d27a153c7033c84c66d52cdf39e84866c23d9c486a5970d2b1c9cbd"} Nov 24 08:33:02 crc kubenswrapper[4691]: I1124 08:33:02.104637 4691 generic.go:334] "Generic (PLEG): container finished" podID="c632a4f2-3e9f-4297-8ad1-296b50d34a81" containerID="c23da99fa8a2cd32c8b9b2a59b59649099b39c5f9785651aceb465f81d6cf0a4" exitCode=0 Nov 24 08:33:02 crc kubenswrapper[4691]: I1124 08:33:02.105031 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-98sj2" event={"ID":"c632a4f2-3e9f-4297-8ad1-296b50d34a81","Type":"ContainerDied","Data":"c23da99fa8a2cd32c8b9b2a59b59649099b39c5f9785651aceb465f81d6cf0a4"} Nov 24 08:33:03 crc kubenswrapper[4691]: I1124 08:33:03.125111 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-98sj2" event={"ID":"c632a4f2-3e9f-4297-8ad1-296b50d34a81","Type":"ContainerStarted","Data":"bf4543046d572114b3871b1e91acd4ee451f4ef7dbcf42a3a6d3400fa3babdec"} Nov 24 08:33:03 crc kubenswrapper[4691]: I1124 08:33:03.151389 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-98sj2" podStartSLOduration=2.717667076 podStartE2EDuration="4.151357742s" podCreationTimestamp="2025-11-24 08:32:59 +0000 UTC" firstStartedPulling="2025-11-24 08:33:01.093059184 +0000 UTC m=+2143.092008423" lastFinishedPulling="2025-11-24 08:33:02.52674984 +0000 UTC m=+2144.525699089" observedRunningTime="2025-11-24 08:33:03.145185376 +0000 UTC m=+2145.144134665" watchObservedRunningTime="2025-11-24 08:33:03.151357742 +0000 UTC m=+2145.150306991" Nov 24 08:33:10 crc kubenswrapper[4691]: I1124 08:33:10.044619 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-98sj2" Nov 24 08:33:10 crc kubenswrapper[4691]: I1124 08:33:10.045604 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-98sj2" Nov 24 08:33:10 crc kubenswrapper[4691]: I1124 08:33:10.092418 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-98sj2" Nov 24 08:33:10 crc kubenswrapper[4691]: I1124 08:33:10.243789 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-98sj2" Nov 24 08:33:10 crc kubenswrapper[4691]: I1124 08:33:10.328247 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-98sj2"] Nov 24 08:33:12 crc kubenswrapper[4691]: I1124 08:33:12.208963 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-98sj2" podUID="c632a4f2-3e9f-4297-8ad1-296b50d34a81" containerName="registry-server" containerID="cri-o://bf4543046d572114b3871b1e91acd4ee451f4ef7dbcf42a3a6d3400fa3babdec" gracePeriod=2 Nov 24 08:33:12 crc kubenswrapper[4691]: I1124 08:33:12.712638 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-98sj2" Nov 24 08:33:12 crc kubenswrapper[4691]: I1124 08:33:12.881876 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tgbc\" (UniqueName: \"kubernetes.io/projected/c632a4f2-3e9f-4297-8ad1-296b50d34a81-kube-api-access-8tgbc\") pod \"c632a4f2-3e9f-4297-8ad1-296b50d34a81\" (UID: \"c632a4f2-3e9f-4297-8ad1-296b50d34a81\") " Nov 24 08:33:12 crc kubenswrapper[4691]: I1124 08:33:12.882006 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c632a4f2-3e9f-4297-8ad1-296b50d34a81-catalog-content\") pod \"c632a4f2-3e9f-4297-8ad1-296b50d34a81\" (UID: \"c632a4f2-3e9f-4297-8ad1-296b50d34a81\") " Nov 24 08:33:12 crc kubenswrapper[4691]: I1124 08:33:12.882340 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c632a4f2-3e9f-4297-8ad1-296b50d34a81-utilities\") pod \"c632a4f2-3e9f-4297-8ad1-296b50d34a81\" (UID: \"c632a4f2-3e9f-4297-8ad1-296b50d34a81\") " Nov 24 08:33:12 crc kubenswrapper[4691]: I1124 08:33:12.883789 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c632a4f2-3e9f-4297-8ad1-296b50d34a81-utilities" (OuterVolumeSpecName: "utilities") pod "c632a4f2-3e9f-4297-8ad1-296b50d34a81" (UID: "c632a4f2-3e9f-4297-8ad1-296b50d34a81"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:33:12 crc kubenswrapper[4691]: I1124 08:33:12.890829 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c632a4f2-3e9f-4297-8ad1-296b50d34a81-kube-api-access-8tgbc" (OuterVolumeSpecName: "kube-api-access-8tgbc") pod "c632a4f2-3e9f-4297-8ad1-296b50d34a81" (UID: "c632a4f2-3e9f-4297-8ad1-296b50d34a81"). InnerVolumeSpecName "kube-api-access-8tgbc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:33:12 crc kubenswrapper[4691]: I1124 08:33:12.921714 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c632a4f2-3e9f-4297-8ad1-296b50d34a81-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c632a4f2-3e9f-4297-8ad1-296b50d34a81" (UID: "c632a4f2-3e9f-4297-8ad1-296b50d34a81"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:33:12 crc kubenswrapper[4691]: I1124 08:33:12.985602 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tgbc\" (UniqueName: \"kubernetes.io/projected/c632a4f2-3e9f-4297-8ad1-296b50d34a81-kube-api-access-8tgbc\") on node \"crc\" DevicePath \"\"" Nov 24 08:33:12 crc kubenswrapper[4691]: I1124 08:33:12.985893 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c632a4f2-3e9f-4297-8ad1-296b50d34a81-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 08:33:12 crc kubenswrapper[4691]: I1124 08:33:12.986018 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c632a4f2-3e9f-4297-8ad1-296b50d34a81-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 08:33:13 crc kubenswrapper[4691]: I1124 08:33:13.220602 4691 generic.go:334] "Generic (PLEG): container finished" podID="c632a4f2-3e9f-4297-8ad1-296b50d34a81" containerID="bf4543046d572114b3871b1e91acd4ee451f4ef7dbcf42a3a6d3400fa3babdec" exitCode=0 Nov 24 08:33:13 crc kubenswrapper[4691]: I1124 08:33:13.220690 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-98sj2" event={"ID":"c632a4f2-3e9f-4297-8ad1-296b50d34a81","Type":"ContainerDied","Data":"bf4543046d572114b3871b1e91acd4ee451f4ef7dbcf42a3a6d3400fa3babdec"} Nov 24 08:33:13 crc kubenswrapper[4691]: I1124 08:33:13.221675 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-98sj2" event={"ID":"c632a4f2-3e9f-4297-8ad1-296b50d34a81","Type":"ContainerDied","Data":"dcde523c5d27a153c7033c84c66d52cdf39e84866c23d9c486a5970d2b1c9cbd"} Nov 24 08:33:13 crc kubenswrapper[4691]: I1124 08:33:13.221697 4691 scope.go:117] "RemoveContainer" containerID="bf4543046d572114b3871b1e91acd4ee451f4ef7dbcf42a3a6d3400fa3babdec" Nov 24 08:33:13 crc kubenswrapper[4691]: I1124 08:33:13.220744 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-98sj2" Nov 24 08:33:13 crc kubenswrapper[4691]: I1124 08:33:13.253577 4691 scope.go:117] "RemoveContainer" containerID="c23da99fa8a2cd32c8b9b2a59b59649099b39c5f9785651aceb465f81d6cf0a4" Nov 24 08:33:13 crc kubenswrapper[4691]: I1124 08:33:13.270344 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-98sj2"] Nov 24 08:33:13 crc kubenswrapper[4691]: I1124 08:33:13.278288 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-98sj2"] Nov 24 08:33:13 crc kubenswrapper[4691]: I1124 08:33:13.280194 4691 scope.go:117] "RemoveContainer" containerID="35a5821aec80d3b0248125333c3806eb83ea2b37389ddcd194f19c5b5d9b5b44" Nov 24 08:33:13 crc kubenswrapper[4691]: I1124 08:33:13.342396 4691 scope.go:117] "RemoveContainer" containerID="bf4543046d572114b3871b1e91acd4ee451f4ef7dbcf42a3a6d3400fa3babdec" Nov 24 08:33:13 crc kubenswrapper[4691]: E1124 08:33:13.345055 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf4543046d572114b3871b1e91acd4ee451f4ef7dbcf42a3a6d3400fa3babdec\": container with ID starting with bf4543046d572114b3871b1e91acd4ee451f4ef7dbcf42a3a6d3400fa3babdec not found: ID does not exist" containerID="bf4543046d572114b3871b1e91acd4ee451f4ef7dbcf42a3a6d3400fa3babdec" Nov 24 08:33:13 crc kubenswrapper[4691]: I1124 08:33:13.345111 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf4543046d572114b3871b1e91acd4ee451f4ef7dbcf42a3a6d3400fa3babdec"} err="failed to get container status \"bf4543046d572114b3871b1e91acd4ee451f4ef7dbcf42a3a6d3400fa3babdec\": rpc error: code = NotFound desc = could not find container \"bf4543046d572114b3871b1e91acd4ee451f4ef7dbcf42a3a6d3400fa3babdec\": container with ID starting with bf4543046d572114b3871b1e91acd4ee451f4ef7dbcf42a3a6d3400fa3babdec not found: ID does not exist" Nov 24 08:33:13 crc kubenswrapper[4691]: I1124 08:33:13.345151 4691 scope.go:117] "RemoveContainer" containerID="c23da99fa8a2cd32c8b9b2a59b59649099b39c5f9785651aceb465f81d6cf0a4" Nov 24 08:33:13 crc kubenswrapper[4691]: E1124 08:33:13.345627 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c23da99fa8a2cd32c8b9b2a59b59649099b39c5f9785651aceb465f81d6cf0a4\": container with ID starting with c23da99fa8a2cd32c8b9b2a59b59649099b39c5f9785651aceb465f81d6cf0a4 not found: ID does not exist" containerID="c23da99fa8a2cd32c8b9b2a59b59649099b39c5f9785651aceb465f81d6cf0a4" Nov 24 08:33:13 crc kubenswrapper[4691]: I1124 08:33:13.345674 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c23da99fa8a2cd32c8b9b2a59b59649099b39c5f9785651aceb465f81d6cf0a4"} err="failed to get container status \"c23da99fa8a2cd32c8b9b2a59b59649099b39c5f9785651aceb465f81d6cf0a4\": rpc error: code = NotFound desc = could not find container \"c23da99fa8a2cd32c8b9b2a59b59649099b39c5f9785651aceb465f81d6cf0a4\": container with ID starting with c23da99fa8a2cd32c8b9b2a59b59649099b39c5f9785651aceb465f81d6cf0a4 not found: ID does not exist" Nov 24 08:33:13 crc kubenswrapper[4691]: I1124 08:33:13.345695 4691 scope.go:117] "RemoveContainer" containerID="35a5821aec80d3b0248125333c3806eb83ea2b37389ddcd194f19c5b5d9b5b44" Nov 24 08:33:13 crc kubenswrapper[4691]: E1124 08:33:13.346015 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"35a5821aec80d3b0248125333c3806eb83ea2b37389ddcd194f19c5b5d9b5b44\": container with ID starting with 35a5821aec80d3b0248125333c3806eb83ea2b37389ddcd194f19c5b5d9b5b44 not found: ID does not exist" containerID="35a5821aec80d3b0248125333c3806eb83ea2b37389ddcd194f19c5b5d9b5b44" Nov 24 08:33:13 crc kubenswrapper[4691]: I1124 08:33:13.346072 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"35a5821aec80d3b0248125333c3806eb83ea2b37389ddcd194f19c5b5d9b5b44"} err="failed to get container status \"35a5821aec80d3b0248125333c3806eb83ea2b37389ddcd194f19c5b5d9b5b44\": rpc error: code = NotFound desc = could not find container \"35a5821aec80d3b0248125333c3806eb83ea2b37389ddcd194f19c5b5d9b5b44\": container with ID starting with 35a5821aec80d3b0248125333c3806eb83ea2b37389ddcd194f19c5b5d9b5b44 not found: ID does not exist" Nov 24 08:33:14 crc kubenswrapper[4691]: I1124 08:33:14.782936 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c632a4f2-3e9f-4297-8ad1-296b50d34a81" path="/var/lib/kubelet/pods/c632a4f2-3e9f-4297-8ad1-296b50d34a81/volumes" Nov 24 08:33:21 crc kubenswrapper[4691]: I1124 08:33:21.089878 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:33:21 crc kubenswrapper[4691]: I1124 08:33:21.090892 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:33:51 crc kubenswrapper[4691]: I1124 08:33:51.091348 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:33:51 crc kubenswrapper[4691]: I1124 08:33:51.092503 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:34:04 crc kubenswrapper[4691]: I1124 08:34:04.754239 4691 generic.go:334] "Generic (PLEG): container finished" podID="e9953558-8b56-432e-bde8-c07beaa047c0" containerID="44312e0a6a2a6ce1137afb59cebbc5ad60605e1b469f5dbb639ea6fc373a769d" exitCode=0 Nov 24 08:34:04 crc kubenswrapper[4691]: I1124 08:34:04.754326 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm" event={"ID":"e9953558-8b56-432e-bde8-c07beaa047c0","Type":"ContainerDied","Data":"44312e0a6a2a6ce1137afb59cebbc5ad60605e1b469f5dbb639ea6fc373a769d"} Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.297979 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.337305 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9953558-8b56-432e-bde8-c07beaa047c0-libvirt-combined-ca-bundle\") pod \"e9953558-8b56-432e-bde8-c07beaa047c0\" (UID: \"e9953558-8b56-432e-bde8-c07beaa047c0\") " Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.337372 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d7d2x\" (UniqueName: \"kubernetes.io/projected/e9953558-8b56-432e-bde8-c07beaa047c0-kube-api-access-d7d2x\") pod \"e9953558-8b56-432e-bde8-c07beaa047c0\" (UID: \"e9953558-8b56-432e-bde8-c07beaa047c0\") " Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.337509 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e9953558-8b56-432e-bde8-c07beaa047c0-ssh-key\") pod \"e9953558-8b56-432e-bde8-c07beaa047c0\" (UID: \"e9953558-8b56-432e-bde8-c07beaa047c0\") " Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.337558 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e9953558-8b56-432e-bde8-c07beaa047c0-inventory\") pod \"e9953558-8b56-432e-bde8-c07beaa047c0\" (UID: \"e9953558-8b56-432e-bde8-c07beaa047c0\") " Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.337615 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/e9953558-8b56-432e-bde8-c07beaa047c0-libvirt-secret-0\") pod \"e9953558-8b56-432e-bde8-c07beaa047c0\" (UID: \"e9953558-8b56-432e-bde8-c07beaa047c0\") " Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.345649 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9953558-8b56-432e-bde8-c07beaa047c0-kube-api-access-d7d2x" (OuterVolumeSpecName: "kube-api-access-d7d2x") pod "e9953558-8b56-432e-bde8-c07beaa047c0" (UID: "e9953558-8b56-432e-bde8-c07beaa047c0"). InnerVolumeSpecName "kube-api-access-d7d2x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.349522 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9953558-8b56-432e-bde8-c07beaa047c0-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "e9953558-8b56-432e-bde8-c07beaa047c0" (UID: "e9953558-8b56-432e-bde8-c07beaa047c0"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.368288 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9953558-8b56-432e-bde8-c07beaa047c0-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "e9953558-8b56-432e-bde8-c07beaa047c0" (UID: "e9953558-8b56-432e-bde8-c07beaa047c0"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.374466 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9953558-8b56-432e-bde8-c07beaa047c0-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "e9953558-8b56-432e-bde8-c07beaa047c0" (UID: "e9953558-8b56-432e-bde8-c07beaa047c0"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.383761 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9953558-8b56-432e-bde8-c07beaa047c0-inventory" (OuterVolumeSpecName: "inventory") pod "e9953558-8b56-432e-bde8-c07beaa047c0" (UID: "e9953558-8b56-432e-bde8-c07beaa047c0"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.440716 4691 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e9953558-8b56-432e-bde8-c07beaa047c0-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.440934 4691 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e9953558-8b56-432e-bde8-c07beaa047c0-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.441015 4691 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/e9953558-8b56-432e-bde8-c07beaa047c0-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.441073 4691 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9953558-8b56-432e-bde8-c07beaa047c0-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.441134 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d7d2x\" (UniqueName: \"kubernetes.io/projected/e9953558-8b56-432e-bde8-c07beaa047c0-kube-api-access-d7d2x\") on node \"crc\" DevicePath \"\"" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.778228 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm" event={"ID":"e9953558-8b56-432e-bde8-c07beaa047c0","Type":"ContainerDied","Data":"3ca259ce79a55dc99d7b178b3de11373381e37161bb8a22ffc45140fb7d95280"} Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.778285 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3ca259ce79a55dc99d7b178b3de11373381e37161bb8a22ffc45140fb7d95280" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.778287 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.893296 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb"] Nov 24 08:34:06 crc kubenswrapper[4691]: E1124 08:34:06.893767 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c632a4f2-3e9f-4297-8ad1-296b50d34a81" containerName="extract-utilities" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.893785 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="c632a4f2-3e9f-4297-8ad1-296b50d34a81" containerName="extract-utilities" Nov 24 08:34:06 crc kubenswrapper[4691]: E1124 08:34:06.893808 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c632a4f2-3e9f-4297-8ad1-296b50d34a81" containerName="extract-content" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.893814 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="c632a4f2-3e9f-4297-8ad1-296b50d34a81" containerName="extract-content" Nov 24 08:34:06 crc kubenswrapper[4691]: E1124 08:34:06.893837 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9953558-8b56-432e-bde8-c07beaa047c0" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.893844 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9953558-8b56-432e-bde8-c07beaa047c0" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 24 08:34:06 crc kubenswrapper[4691]: E1124 08:34:06.893857 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c632a4f2-3e9f-4297-8ad1-296b50d34a81" containerName="registry-server" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.893862 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="c632a4f2-3e9f-4297-8ad1-296b50d34a81" containerName="registry-server" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.894217 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="c632a4f2-3e9f-4297-8ad1-296b50d34a81" containerName="registry-server" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.894250 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9953558-8b56-432e-bde8-c07beaa047c0" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.894917 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.897768 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.898075 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.898218 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.898396 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.898412 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.899831 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb"] Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.905162 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.905367 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-l69jd" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.952646 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-zsxsb\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.952691 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-zsxsb\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.952727 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-zsxsb\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.952812 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-zsxsb\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.952840 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-86ht8\" (UniqueName: \"kubernetes.io/projected/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-kube-api-access-86ht8\") pod \"nova-edpm-deployment-openstack-edpm-ipam-zsxsb\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.952873 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-zsxsb\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.952902 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-zsxsb\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.952921 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-zsxsb\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" Nov 24 08:34:06 crc kubenswrapper[4691]: I1124 08:34:06.952986 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-zsxsb\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" Nov 24 08:34:07 crc kubenswrapper[4691]: I1124 08:34:07.054030 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-zsxsb\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" Nov 24 08:34:07 crc kubenswrapper[4691]: I1124 08:34:07.054078 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-zsxsb\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" Nov 24 08:34:07 crc kubenswrapper[4691]: I1124 08:34:07.054105 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-zsxsb\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" Nov 24 08:34:07 crc kubenswrapper[4691]: I1124 08:34:07.054153 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-zsxsb\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" Nov 24 08:34:07 crc kubenswrapper[4691]: I1124 08:34:07.054177 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-86ht8\" (UniqueName: \"kubernetes.io/projected/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-kube-api-access-86ht8\") pod \"nova-edpm-deployment-openstack-edpm-ipam-zsxsb\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" Nov 24 08:34:07 crc kubenswrapper[4691]: I1124 08:34:07.054208 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-zsxsb\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" Nov 24 08:34:07 crc kubenswrapper[4691]: I1124 08:34:07.054236 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-zsxsb\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" Nov 24 08:34:07 crc kubenswrapper[4691]: I1124 08:34:07.054254 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-zsxsb\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" Nov 24 08:34:07 crc kubenswrapper[4691]: I1124 08:34:07.054776 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-zsxsb\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" Nov 24 08:34:07 crc kubenswrapper[4691]: I1124 08:34:07.055326 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-zsxsb\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" Nov 24 08:34:07 crc kubenswrapper[4691]: I1124 08:34:07.059672 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-zsxsb\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" Nov 24 08:34:07 crc kubenswrapper[4691]: I1124 08:34:07.059726 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-zsxsb\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" Nov 24 08:34:07 crc kubenswrapper[4691]: I1124 08:34:07.060427 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-zsxsb\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" Nov 24 08:34:07 crc kubenswrapper[4691]: I1124 08:34:07.061210 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-zsxsb\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" Nov 24 08:34:07 crc kubenswrapper[4691]: I1124 08:34:07.061316 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-zsxsb\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" Nov 24 08:34:07 crc kubenswrapper[4691]: I1124 08:34:07.061971 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-zsxsb\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" Nov 24 08:34:07 crc kubenswrapper[4691]: I1124 08:34:07.062759 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-zsxsb\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" Nov 24 08:34:07 crc kubenswrapper[4691]: I1124 08:34:07.073896 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-86ht8\" (UniqueName: \"kubernetes.io/projected/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-kube-api-access-86ht8\") pod \"nova-edpm-deployment-openstack-edpm-ipam-zsxsb\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" Nov 24 08:34:07 crc kubenswrapper[4691]: I1124 08:34:07.214546 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" Nov 24 08:34:07 crc kubenswrapper[4691]: I1124 08:34:07.777653 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb"] Nov 24 08:34:07 crc kubenswrapper[4691]: I1124 08:34:07.787965 4691 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 08:34:08 crc kubenswrapper[4691]: I1124 08:34:08.801608 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" event={"ID":"bf4bcfba-eec4-43be-b119-cf8f0bdd7182","Type":"ContainerStarted","Data":"a80c66c5c317cc7a96c6497e47260bbfd94131532c8a8c4920ca0889a0d3a86f"} Nov 24 08:34:08 crc kubenswrapper[4691]: I1124 08:34:08.801666 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" event={"ID":"bf4bcfba-eec4-43be-b119-cf8f0bdd7182","Type":"ContainerStarted","Data":"9bbd47485105e30c6a483dee11e2a598b4722d8f445f02331fc9efb8b9d798a7"} Nov 24 08:34:08 crc kubenswrapper[4691]: I1124 08:34:08.824416 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" podStartSLOduration=2.370792802 podStartE2EDuration="2.824314924s" podCreationTimestamp="2025-11-24 08:34:06 +0000 UTC" firstStartedPulling="2025-11-24 08:34:07.787656491 +0000 UTC m=+2209.786605750" lastFinishedPulling="2025-11-24 08:34:08.241178623 +0000 UTC m=+2210.240127872" observedRunningTime="2025-11-24 08:34:08.818940261 +0000 UTC m=+2210.817889530" watchObservedRunningTime="2025-11-24 08:34:08.824314924 +0000 UTC m=+2210.823264193" Nov 24 08:34:21 crc kubenswrapper[4691]: I1124 08:34:21.089801 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:34:21 crc kubenswrapper[4691]: I1124 08:34:21.090391 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:34:21 crc kubenswrapper[4691]: I1124 08:34:21.090437 4691 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 08:34:21 crc kubenswrapper[4691]: I1124 08:34:21.091243 4691 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e7b637f4980bbb9ac5b384cec7b1f94c6dc365ccdf664c7e8d7855d6a60bff20"} pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 08:34:21 crc kubenswrapper[4691]: I1124 08:34:21.091286 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" containerID="cri-o://e7b637f4980bbb9ac5b384cec7b1f94c6dc365ccdf664c7e8d7855d6a60bff20" gracePeriod=600 Nov 24 08:34:21 crc kubenswrapper[4691]: E1124 08:34:21.231277 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:34:21 crc kubenswrapper[4691]: I1124 08:34:21.935870 4691 generic.go:334] "Generic (PLEG): container finished" podID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerID="e7b637f4980bbb9ac5b384cec7b1f94c6dc365ccdf664c7e8d7855d6a60bff20" exitCode=0 Nov 24 08:34:21 crc kubenswrapper[4691]: I1124 08:34:21.935921 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerDied","Data":"e7b637f4980bbb9ac5b384cec7b1f94c6dc365ccdf664c7e8d7855d6a60bff20"} Nov 24 08:34:21 crc kubenswrapper[4691]: I1124 08:34:21.935957 4691 scope.go:117] "RemoveContainer" containerID="70419c902bba2a0ee14c9bd0fd9567bf6662a2111ab103dd012ed4d7572a55ae" Nov 24 08:34:21 crc kubenswrapper[4691]: I1124 08:34:21.936400 4691 scope.go:117] "RemoveContainer" containerID="e7b637f4980bbb9ac5b384cec7b1f94c6dc365ccdf664c7e8d7855d6a60bff20" Nov 24 08:34:21 crc kubenswrapper[4691]: E1124 08:34:21.936660 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:34:34 crc kubenswrapper[4691]: I1124 08:34:34.760926 4691 scope.go:117] "RemoveContainer" containerID="e7b637f4980bbb9ac5b384cec7b1f94c6dc365ccdf664c7e8d7855d6a60bff20" Nov 24 08:34:34 crc kubenswrapper[4691]: E1124 08:34:34.761955 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:34:47 crc kubenswrapper[4691]: I1124 08:34:47.761094 4691 scope.go:117] "RemoveContainer" containerID="e7b637f4980bbb9ac5b384cec7b1f94c6dc365ccdf664c7e8d7855d6a60bff20" Nov 24 08:34:47 crc kubenswrapper[4691]: E1124 08:34:47.762364 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:34:59 crc kubenswrapper[4691]: I1124 08:34:59.761513 4691 scope.go:117] "RemoveContainer" containerID="e7b637f4980bbb9ac5b384cec7b1f94c6dc365ccdf664c7e8d7855d6a60bff20" Nov 24 08:34:59 crc kubenswrapper[4691]: E1124 08:34:59.762425 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:35:14 crc kubenswrapper[4691]: I1124 08:35:14.761639 4691 scope.go:117] "RemoveContainer" containerID="e7b637f4980bbb9ac5b384cec7b1f94c6dc365ccdf664c7e8d7855d6a60bff20" Nov 24 08:35:14 crc kubenswrapper[4691]: E1124 08:35:14.762829 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:35:27 crc kubenswrapper[4691]: I1124 08:35:27.761099 4691 scope.go:117] "RemoveContainer" containerID="e7b637f4980bbb9ac5b384cec7b1f94c6dc365ccdf664c7e8d7855d6a60bff20" Nov 24 08:35:27 crc kubenswrapper[4691]: E1124 08:35:27.762366 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:35:40 crc kubenswrapper[4691]: I1124 08:35:40.760866 4691 scope.go:117] "RemoveContainer" containerID="e7b637f4980bbb9ac5b384cec7b1f94c6dc365ccdf664c7e8d7855d6a60bff20" Nov 24 08:35:40 crc kubenswrapper[4691]: E1124 08:35:40.762345 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:35:51 crc kubenswrapper[4691]: I1124 08:35:51.761722 4691 scope.go:117] "RemoveContainer" containerID="e7b637f4980bbb9ac5b384cec7b1f94c6dc365ccdf664c7e8d7855d6a60bff20" Nov 24 08:35:51 crc kubenswrapper[4691]: E1124 08:35:51.762850 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:36:05 crc kubenswrapper[4691]: I1124 08:36:05.760850 4691 scope.go:117] "RemoveContainer" containerID="e7b637f4980bbb9ac5b384cec7b1f94c6dc365ccdf664c7e8d7855d6a60bff20" Nov 24 08:36:05 crc kubenswrapper[4691]: E1124 08:36:05.761955 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:36:17 crc kubenswrapper[4691]: I1124 08:36:17.760767 4691 scope.go:117] "RemoveContainer" containerID="e7b637f4980bbb9ac5b384cec7b1f94c6dc365ccdf664c7e8d7855d6a60bff20" Nov 24 08:36:17 crc kubenswrapper[4691]: E1124 08:36:17.761716 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:36:31 crc kubenswrapper[4691]: I1124 08:36:31.760649 4691 scope.go:117] "RemoveContainer" containerID="e7b637f4980bbb9ac5b384cec7b1f94c6dc365ccdf664c7e8d7855d6a60bff20" Nov 24 08:36:31 crc kubenswrapper[4691]: E1124 08:36:31.761528 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:36:46 crc kubenswrapper[4691]: I1124 08:36:46.760902 4691 scope.go:117] "RemoveContainer" containerID="e7b637f4980bbb9ac5b384cec7b1f94c6dc365ccdf664c7e8d7855d6a60bff20" Nov 24 08:36:46 crc kubenswrapper[4691]: E1124 08:36:46.762587 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:36:58 crc kubenswrapper[4691]: I1124 08:36:58.769121 4691 scope.go:117] "RemoveContainer" containerID="e7b637f4980bbb9ac5b384cec7b1f94c6dc365ccdf664c7e8d7855d6a60bff20" Nov 24 08:36:58 crc kubenswrapper[4691]: E1124 08:36:58.769955 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:36:59 crc kubenswrapper[4691]: I1124 08:36:59.505201 4691 generic.go:334] "Generic (PLEG): container finished" podID="bf4bcfba-eec4-43be-b119-cf8f0bdd7182" containerID="a80c66c5c317cc7a96c6497e47260bbfd94131532c8a8c4920ca0889a0d3a86f" exitCode=0 Nov 24 08:36:59 crc kubenswrapper[4691]: I1124 08:36:59.505273 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" event={"ID":"bf4bcfba-eec4-43be-b119-cf8f0bdd7182","Type":"ContainerDied","Data":"a80c66c5c317cc7a96c6497e47260bbfd94131532c8a8c4920ca0889a0d3a86f"} Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.012326 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.154637 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-inventory\") pod \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.155027 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-ssh-key\") pod \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.155124 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-cell1-compute-config-0\") pod \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.155279 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-86ht8\" (UniqueName: \"kubernetes.io/projected/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-kube-api-access-86ht8\") pod \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.155364 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-cell1-compute-config-1\") pod \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.155479 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-extra-config-0\") pod \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.155559 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-migration-ssh-key-1\") pod \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.155677 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-migration-ssh-key-0\") pod \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.155807 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-combined-ca-bundle\") pod \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\" (UID: \"bf4bcfba-eec4-43be-b119-cf8f0bdd7182\") " Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.171048 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-kube-api-access-86ht8" (OuterVolumeSpecName: "kube-api-access-86ht8") pod "bf4bcfba-eec4-43be-b119-cf8f0bdd7182" (UID: "bf4bcfba-eec4-43be-b119-cf8f0bdd7182"). InnerVolumeSpecName "kube-api-access-86ht8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.172012 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "bf4bcfba-eec4-43be-b119-cf8f0bdd7182" (UID: "bf4bcfba-eec4-43be-b119-cf8f0bdd7182"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.209567 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-inventory" (OuterVolumeSpecName: "inventory") pod "bf4bcfba-eec4-43be-b119-cf8f0bdd7182" (UID: "bf4bcfba-eec4-43be-b119-cf8f0bdd7182"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.233616 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "bf4bcfba-eec4-43be-b119-cf8f0bdd7182" (UID: "bf4bcfba-eec4-43be-b119-cf8f0bdd7182"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.250644 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "bf4bcfba-eec4-43be-b119-cf8f0bdd7182" (UID: "bf4bcfba-eec4-43be-b119-cf8f0bdd7182"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.264802 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "bf4bcfba-eec4-43be-b119-cf8f0bdd7182" (UID: "bf4bcfba-eec4-43be-b119-cf8f0bdd7182"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.267176 4691 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.267213 4691 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.267232 4691 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.267261 4691 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.267284 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-86ht8\" (UniqueName: \"kubernetes.io/projected/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-kube-api-access-86ht8\") on node \"crc\" DevicePath \"\"" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.267301 4691 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.268577 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "bf4bcfba-eec4-43be-b119-cf8f0bdd7182" (UID: "bf4bcfba-eec4-43be-b119-cf8f0bdd7182"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.293906 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "bf4bcfba-eec4-43be-b119-cf8f0bdd7182" (UID: "bf4bcfba-eec4-43be-b119-cf8f0bdd7182"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.295076 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "bf4bcfba-eec4-43be-b119-cf8f0bdd7182" (UID: "bf4bcfba-eec4-43be-b119-cf8f0bdd7182"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.371920 4691 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.371958 4691 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.371971 4691 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/bf4bcfba-eec4-43be-b119-cf8f0bdd7182-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.524757 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" event={"ID":"bf4bcfba-eec4-43be-b119-cf8f0bdd7182","Type":"ContainerDied","Data":"9bbd47485105e30c6a483dee11e2a598b4722d8f445f02331fc9efb8b9d798a7"} Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.525477 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9bbd47485105e30c6a483dee11e2a598b4722d8f445f02331fc9efb8b9d798a7" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.525811 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-zsxsb" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.637958 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp"] Nov 24 08:37:01 crc kubenswrapper[4691]: E1124 08:37:01.638727 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf4bcfba-eec4-43be-b119-cf8f0bdd7182" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.640437 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf4bcfba-eec4-43be-b119-cf8f0bdd7182" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.641042 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf4bcfba-eec4-43be-b119-cf8f0bdd7182" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.641968 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.644965 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.645089 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-l69jd" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.645147 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.645251 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.645316 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.650529 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp"] Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.783321 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp\" (UID: \"b19b3af1-e299-46ab-b579-902390cb75a3\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.783681 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp\" (UID: \"b19b3af1-e299-46ab-b579-902390cb75a3\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.783770 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zblwx\" (UniqueName: \"kubernetes.io/projected/b19b3af1-e299-46ab-b579-902390cb75a3-kube-api-access-zblwx\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp\" (UID: \"b19b3af1-e299-46ab-b579-902390cb75a3\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.783854 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp\" (UID: \"b19b3af1-e299-46ab-b579-902390cb75a3\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.783981 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp\" (UID: \"b19b3af1-e299-46ab-b579-902390cb75a3\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.784036 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp\" (UID: \"b19b3af1-e299-46ab-b579-902390cb75a3\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.784086 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp\" (UID: \"b19b3af1-e299-46ab-b579-902390cb75a3\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.886118 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp\" (UID: \"b19b3af1-e299-46ab-b579-902390cb75a3\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.886195 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zblwx\" (UniqueName: \"kubernetes.io/projected/b19b3af1-e299-46ab-b579-902390cb75a3-kube-api-access-zblwx\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp\" (UID: \"b19b3af1-e299-46ab-b579-902390cb75a3\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.886245 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp\" (UID: \"b19b3af1-e299-46ab-b579-902390cb75a3\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.886290 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp\" (UID: \"b19b3af1-e299-46ab-b579-902390cb75a3\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.886310 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp\" (UID: \"b19b3af1-e299-46ab-b579-902390cb75a3\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.886338 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp\" (UID: \"b19b3af1-e299-46ab-b579-902390cb75a3\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.886394 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp\" (UID: \"b19b3af1-e299-46ab-b579-902390cb75a3\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.890503 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp\" (UID: \"b19b3af1-e299-46ab-b579-902390cb75a3\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.890676 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp\" (UID: \"b19b3af1-e299-46ab-b579-902390cb75a3\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.890703 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp\" (UID: \"b19b3af1-e299-46ab-b579-902390cb75a3\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.891028 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp\" (UID: \"b19b3af1-e299-46ab-b579-902390cb75a3\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.893187 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp\" (UID: \"b19b3af1-e299-46ab-b579-902390cb75a3\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.893341 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp\" (UID: \"b19b3af1-e299-46ab-b579-902390cb75a3\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.904132 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zblwx\" (UniqueName: \"kubernetes.io/projected/b19b3af1-e299-46ab-b579-902390cb75a3-kube-api-access-zblwx\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp\" (UID: \"b19b3af1-e299-46ab-b579-902390cb75a3\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp" Nov 24 08:37:01 crc kubenswrapper[4691]: I1124 08:37:01.966727 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp" Nov 24 08:37:02 crc kubenswrapper[4691]: I1124 08:37:02.531193 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp"] Nov 24 08:37:03 crc kubenswrapper[4691]: I1124 08:37:03.549190 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp" event={"ID":"b19b3af1-e299-46ab-b579-902390cb75a3","Type":"ContainerStarted","Data":"89838f44765ff3417f5c2cec00d3290295bb8c6fd3d3514ff61eea80a9fc406e"} Nov 24 08:37:03 crc kubenswrapper[4691]: I1124 08:37:03.549975 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp" event={"ID":"b19b3af1-e299-46ab-b579-902390cb75a3","Type":"ContainerStarted","Data":"356b8981fee10ab7726fcfb88daea4f20dc0e656ade9a04a909195d71f133edc"} Nov 24 08:37:03 crc kubenswrapper[4691]: I1124 08:37:03.587601 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp" podStartSLOduration=2.141303405 podStartE2EDuration="2.587536449s" podCreationTimestamp="2025-11-24 08:37:01 +0000 UTC" firstStartedPulling="2025-11-24 08:37:02.54751601 +0000 UTC m=+2384.546465259" lastFinishedPulling="2025-11-24 08:37:02.993749014 +0000 UTC m=+2384.992698303" observedRunningTime="2025-11-24 08:37:03.577365769 +0000 UTC m=+2385.576315078" watchObservedRunningTime="2025-11-24 08:37:03.587536449 +0000 UTC m=+2385.586485708" Nov 24 08:37:09 crc kubenswrapper[4691]: I1124 08:37:09.760790 4691 scope.go:117] "RemoveContainer" containerID="e7b637f4980bbb9ac5b384cec7b1f94c6dc365ccdf664c7e8d7855d6a60bff20" Nov 24 08:37:09 crc kubenswrapper[4691]: E1124 08:37:09.761886 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:37:23 crc kubenswrapper[4691]: I1124 08:37:23.761865 4691 scope.go:117] "RemoveContainer" containerID="e7b637f4980bbb9ac5b384cec7b1f94c6dc365ccdf664c7e8d7855d6a60bff20" Nov 24 08:37:23 crc kubenswrapper[4691]: E1124 08:37:23.763027 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:37:34 crc kubenswrapper[4691]: I1124 08:37:34.761187 4691 scope.go:117] "RemoveContainer" containerID="e7b637f4980bbb9ac5b384cec7b1f94c6dc365ccdf664c7e8d7855d6a60bff20" Nov 24 08:37:34 crc kubenswrapper[4691]: E1124 08:37:34.762628 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:37:48 crc kubenswrapper[4691]: I1124 08:37:48.768507 4691 scope.go:117] "RemoveContainer" containerID="e7b637f4980bbb9ac5b384cec7b1f94c6dc365ccdf664c7e8d7855d6a60bff20" Nov 24 08:37:48 crc kubenswrapper[4691]: E1124 08:37:48.769434 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:38:00 crc kubenswrapper[4691]: I1124 08:38:00.763261 4691 scope.go:117] "RemoveContainer" containerID="e7b637f4980bbb9ac5b384cec7b1f94c6dc365ccdf664c7e8d7855d6a60bff20" Nov 24 08:38:00 crc kubenswrapper[4691]: E1124 08:38:00.765423 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:38:11 crc kubenswrapper[4691]: I1124 08:38:11.761870 4691 scope.go:117] "RemoveContainer" containerID="e7b637f4980bbb9ac5b384cec7b1f94c6dc365ccdf664c7e8d7855d6a60bff20" Nov 24 08:38:11 crc kubenswrapper[4691]: E1124 08:38:11.762983 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:38:22 crc kubenswrapper[4691]: I1124 08:38:22.761159 4691 scope.go:117] "RemoveContainer" containerID="e7b637f4980bbb9ac5b384cec7b1f94c6dc365ccdf664c7e8d7855d6a60bff20" Nov 24 08:38:22 crc kubenswrapper[4691]: E1124 08:38:22.762214 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:38:34 crc kubenswrapper[4691]: I1124 08:38:34.768933 4691 scope.go:117] "RemoveContainer" containerID="e7b637f4980bbb9ac5b384cec7b1f94c6dc365ccdf664c7e8d7855d6a60bff20" Nov 24 08:38:34 crc kubenswrapper[4691]: E1124 08:38:34.770005 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:38:49 crc kubenswrapper[4691]: I1124 08:38:49.761119 4691 scope.go:117] "RemoveContainer" containerID="e7b637f4980bbb9ac5b384cec7b1f94c6dc365ccdf664c7e8d7855d6a60bff20" Nov 24 08:38:49 crc kubenswrapper[4691]: E1124 08:38:49.762187 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:39:02 crc kubenswrapper[4691]: I1124 08:39:02.760655 4691 scope.go:117] "RemoveContainer" containerID="e7b637f4980bbb9ac5b384cec7b1f94c6dc365ccdf664c7e8d7855d6a60bff20" Nov 24 08:39:02 crc kubenswrapper[4691]: E1124 08:39:02.761518 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:39:14 crc kubenswrapper[4691]: I1124 08:39:14.761611 4691 scope.go:117] "RemoveContainer" containerID="e7b637f4980bbb9ac5b384cec7b1f94c6dc365ccdf664c7e8d7855d6a60bff20" Nov 24 08:39:14 crc kubenswrapper[4691]: E1124 08:39:14.762978 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:39:17 crc kubenswrapper[4691]: I1124 08:39:17.394409 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-zdmwv"] Nov 24 08:39:17 crc kubenswrapper[4691]: I1124 08:39:17.397566 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zdmwv" Nov 24 08:39:17 crc kubenswrapper[4691]: I1124 08:39:17.413177 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zdmwv"] Nov 24 08:39:17 crc kubenswrapper[4691]: I1124 08:39:17.511268 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b7dbc62-758b-4030-9149-7e77eceeea83-catalog-content\") pod \"redhat-operators-zdmwv\" (UID: \"0b7dbc62-758b-4030-9149-7e77eceeea83\") " pod="openshift-marketplace/redhat-operators-zdmwv" Nov 24 08:39:17 crc kubenswrapper[4691]: I1124 08:39:17.511408 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b7dbc62-758b-4030-9149-7e77eceeea83-utilities\") pod \"redhat-operators-zdmwv\" (UID: \"0b7dbc62-758b-4030-9149-7e77eceeea83\") " pod="openshift-marketplace/redhat-operators-zdmwv" Nov 24 08:39:17 crc kubenswrapper[4691]: I1124 08:39:17.511659 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5h2jf\" (UniqueName: \"kubernetes.io/projected/0b7dbc62-758b-4030-9149-7e77eceeea83-kube-api-access-5h2jf\") pod \"redhat-operators-zdmwv\" (UID: \"0b7dbc62-758b-4030-9149-7e77eceeea83\") " pod="openshift-marketplace/redhat-operators-zdmwv" Nov 24 08:39:17 crc kubenswrapper[4691]: I1124 08:39:17.614842 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b7dbc62-758b-4030-9149-7e77eceeea83-utilities\") pod \"redhat-operators-zdmwv\" (UID: \"0b7dbc62-758b-4030-9149-7e77eceeea83\") " pod="openshift-marketplace/redhat-operators-zdmwv" Nov 24 08:39:17 crc kubenswrapper[4691]: I1124 08:39:17.614970 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5h2jf\" (UniqueName: \"kubernetes.io/projected/0b7dbc62-758b-4030-9149-7e77eceeea83-kube-api-access-5h2jf\") pod \"redhat-operators-zdmwv\" (UID: \"0b7dbc62-758b-4030-9149-7e77eceeea83\") " pod="openshift-marketplace/redhat-operators-zdmwv" Nov 24 08:39:17 crc kubenswrapper[4691]: I1124 08:39:17.615075 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b7dbc62-758b-4030-9149-7e77eceeea83-catalog-content\") pod \"redhat-operators-zdmwv\" (UID: \"0b7dbc62-758b-4030-9149-7e77eceeea83\") " pod="openshift-marketplace/redhat-operators-zdmwv" Nov 24 08:39:17 crc kubenswrapper[4691]: I1124 08:39:17.615528 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b7dbc62-758b-4030-9149-7e77eceeea83-utilities\") pod \"redhat-operators-zdmwv\" (UID: \"0b7dbc62-758b-4030-9149-7e77eceeea83\") " pod="openshift-marketplace/redhat-operators-zdmwv" Nov 24 08:39:17 crc kubenswrapper[4691]: I1124 08:39:17.615625 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b7dbc62-758b-4030-9149-7e77eceeea83-catalog-content\") pod \"redhat-operators-zdmwv\" (UID: \"0b7dbc62-758b-4030-9149-7e77eceeea83\") " pod="openshift-marketplace/redhat-operators-zdmwv" Nov 24 08:39:17 crc kubenswrapper[4691]: I1124 08:39:17.648593 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5h2jf\" (UniqueName: \"kubernetes.io/projected/0b7dbc62-758b-4030-9149-7e77eceeea83-kube-api-access-5h2jf\") pod \"redhat-operators-zdmwv\" (UID: \"0b7dbc62-758b-4030-9149-7e77eceeea83\") " pod="openshift-marketplace/redhat-operators-zdmwv" Nov 24 08:39:17 crc kubenswrapper[4691]: I1124 08:39:17.755561 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zdmwv" Nov 24 08:39:18 crc kubenswrapper[4691]: I1124 08:39:18.282481 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zdmwv"] Nov 24 08:39:18 crc kubenswrapper[4691]: I1124 08:39:18.953555 4691 generic.go:334] "Generic (PLEG): container finished" podID="0b7dbc62-758b-4030-9149-7e77eceeea83" containerID="8af5e22b55e78e02a0f5ed43095391eb67947e4a18a80a7ee880b10863f47739" exitCode=0 Nov 24 08:39:18 crc kubenswrapper[4691]: I1124 08:39:18.953605 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zdmwv" event={"ID":"0b7dbc62-758b-4030-9149-7e77eceeea83","Type":"ContainerDied","Data":"8af5e22b55e78e02a0f5ed43095391eb67947e4a18a80a7ee880b10863f47739"} Nov 24 08:39:18 crc kubenswrapper[4691]: I1124 08:39:18.953953 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zdmwv" event={"ID":"0b7dbc62-758b-4030-9149-7e77eceeea83","Type":"ContainerStarted","Data":"a24a4ccf0626531de2198f5aa90091145e346e261f4ea9ce3bfc7dea4ab2a37a"} Nov 24 08:39:18 crc kubenswrapper[4691]: I1124 08:39:18.957670 4691 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 08:39:19 crc kubenswrapper[4691]: I1124 08:39:19.968609 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zdmwv" event={"ID":"0b7dbc62-758b-4030-9149-7e77eceeea83","Type":"ContainerStarted","Data":"351fa0fc0f6c387c19e2bc5cfe7fb561d0c5ec1f94375cb517d87e8042291bdd"} Nov 24 08:39:20 crc kubenswrapper[4691]: I1124 08:39:20.980229 4691 generic.go:334] "Generic (PLEG): container finished" podID="0b7dbc62-758b-4030-9149-7e77eceeea83" containerID="351fa0fc0f6c387c19e2bc5cfe7fb561d0c5ec1f94375cb517d87e8042291bdd" exitCode=0 Nov 24 08:39:20 crc kubenswrapper[4691]: I1124 08:39:20.980367 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zdmwv" event={"ID":"0b7dbc62-758b-4030-9149-7e77eceeea83","Type":"ContainerDied","Data":"351fa0fc0f6c387c19e2bc5cfe7fb561d0c5ec1f94375cb517d87e8042291bdd"} Nov 24 08:39:21 crc kubenswrapper[4691]: I1124 08:39:21.996172 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zdmwv" event={"ID":"0b7dbc62-758b-4030-9149-7e77eceeea83","Type":"ContainerStarted","Data":"f65a49f19ae38c9363c27a48e8c574f3aca9c6056fcbda53657da57a0b659d94"} Nov 24 08:39:22 crc kubenswrapper[4691]: I1124 08:39:22.015591 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-zdmwv" podStartSLOduration=2.354188947 podStartE2EDuration="5.01557318s" podCreationTimestamp="2025-11-24 08:39:17 +0000 UTC" firstStartedPulling="2025-11-24 08:39:18.957327137 +0000 UTC m=+2520.956276386" lastFinishedPulling="2025-11-24 08:39:21.61871137 +0000 UTC m=+2523.617660619" observedRunningTime="2025-11-24 08:39:22.013770219 +0000 UTC m=+2524.012719468" watchObservedRunningTime="2025-11-24 08:39:22.01557318 +0000 UTC m=+2524.014522429" Nov 24 08:39:25 crc kubenswrapper[4691]: I1124 08:39:25.760148 4691 scope.go:117] "RemoveContainer" containerID="e7b637f4980bbb9ac5b384cec7b1f94c6dc365ccdf664c7e8d7855d6a60bff20" Nov 24 08:39:27 crc kubenswrapper[4691]: I1124 08:39:27.756373 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-zdmwv" Nov 24 08:39:27 crc kubenswrapper[4691]: I1124 08:39:27.757349 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-zdmwv" Nov 24 08:39:27 crc kubenswrapper[4691]: I1124 08:39:27.823265 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-zdmwv" Nov 24 08:39:28 crc kubenswrapper[4691]: I1124 08:39:28.046372 4691 generic.go:334] "Generic (PLEG): container finished" podID="b19b3af1-e299-46ab-b579-902390cb75a3" containerID="89838f44765ff3417f5c2cec00d3290295bb8c6fd3d3514ff61eea80a9fc406e" exitCode=0 Nov 24 08:39:28 crc kubenswrapper[4691]: I1124 08:39:28.046727 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp" event={"ID":"b19b3af1-e299-46ab-b579-902390cb75a3","Type":"ContainerDied","Data":"89838f44765ff3417f5c2cec00d3290295bb8c6fd3d3514ff61eea80a9fc406e"} Nov 24 08:39:28 crc kubenswrapper[4691]: I1124 08:39:28.050824 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerStarted","Data":"f6d92258bfa625e20cc52564ae622942d4685b3155e92eab89a59d15f2bcf57b"} Nov 24 08:39:28 crc kubenswrapper[4691]: I1124 08:39:28.108750 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-zdmwv" Nov 24 08:39:28 crc kubenswrapper[4691]: I1124 08:39:28.162040 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zdmwv"] Nov 24 08:39:29 crc kubenswrapper[4691]: I1124 08:39:29.514042 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp" Nov 24 08:39:29 crc kubenswrapper[4691]: I1124 08:39:29.554613 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-ssh-key\") pod \"b19b3af1-e299-46ab-b579-902390cb75a3\" (UID: \"b19b3af1-e299-46ab-b579-902390cb75a3\") " Nov 24 08:39:29 crc kubenswrapper[4691]: I1124 08:39:29.554719 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-inventory\") pod \"b19b3af1-e299-46ab-b579-902390cb75a3\" (UID: \"b19b3af1-e299-46ab-b579-902390cb75a3\") " Nov 24 08:39:29 crc kubenswrapper[4691]: I1124 08:39:29.555651 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-ceilometer-compute-config-data-0\") pod \"b19b3af1-e299-46ab-b579-902390cb75a3\" (UID: \"b19b3af1-e299-46ab-b579-902390cb75a3\") " Nov 24 08:39:29 crc kubenswrapper[4691]: I1124 08:39:29.555826 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zblwx\" (UniqueName: \"kubernetes.io/projected/b19b3af1-e299-46ab-b579-902390cb75a3-kube-api-access-zblwx\") pod \"b19b3af1-e299-46ab-b579-902390cb75a3\" (UID: \"b19b3af1-e299-46ab-b579-902390cb75a3\") " Nov 24 08:39:29 crc kubenswrapper[4691]: I1124 08:39:29.556752 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-ceilometer-compute-config-data-1\") pod \"b19b3af1-e299-46ab-b579-902390cb75a3\" (UID: \"b19b3af1-e299-46ab-b579-902390cb75a3\") " Nov 24 08:39:29 crc kubenswrapper[4691]: I1124 08:39:29.556850 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-ceilometer-compute-config-data-2\") pod \"b19b3af1-e299-46ab-b579-902390cb75a3\" (UID: \"b19b3af1-e299-46ab-b579-902390cb75a3\") " Nov 24 08:39:29 crc kubenswrapper[4691]: I1124 08:39:29.557009 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-telemetry-combined-ca-bundle\") pod \"b19b3af1-e299-46ab-b579-902390cb75a3\" (UID: \"b19b3af1-e299-46ab-b579-902390cb75a3\") " Nov 24 08:39:29 crc kubenswrapper[4691]: I1124 08:39:29.563043 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b19b3af1-e299-46ab-b579-902390cb75a3-kube-api-access-zblwx" (OuterVolumeSpecName: "kube-api-access-zblwx") pod "b19b3af1-e299-46ab-b579-902390cb75a3" (UID: "b19b3af1-e299-46ab-b579-902390cb75a3"). InnerVolumeSpecName "kube-api-access-zblwx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:39:29 crc kubenswrapper[4691]: I1124 08:39:29.570551 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "b19b3af1-e299-46ab-b579-902390cb75a3" (UID: "b19b3af1-e299-46ab-b579-902390cb75a3"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:39:29 crc kubenswrapper[4691]: I1124 08:39:29.584474 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "b19b3af1-e299-46ab-b579-902390cb75a3" (UID: "b19b3af1-e299-46ab-b579-902390cb75a3"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:39:29 crc kubenswrapper[4691]: I1124 08:39:29.594337 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "b19b3af1-e299-46ab-b579-902390cb75a3" (UID: "b19b3af1-e299-46ab-b579-902390cb75a3"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:39:29 crc kubenswrapper[4691]: I1124 08:39:29.599815 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b19b3af1-e299-46ab-b579-902390cb75a3" (UID: "b19b3af1-e299-46ab-b579-902390cb75a3"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:39:29 crc kubenswrapper[4691]: I1124 08:39:29.600913 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "b19b3af1-e299-46ab-b579-902390cb75a3" (UID: "b19b3af1-e299-46ab-b579-902390cb75a3"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:39:29 crc kubenswrapper[4691]: I1124 08:39:29.609799 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-inventory" (OuterVolumeSpecName: "inventory") pod "b19b3af1-e299-46ab-b579-902390cb75a3" (UID: "b19b3af1-e299-46ab-b579-902390cb75a3"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:39:29 crc kubenswrapper[4691]: I1124 08:39:29.659991 4691 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 08:39:29 crc kubenswrapper[4691]: I1124 08:39:29.660141 4691 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 24 08:39:29 crc kubenswrapper[4691]: I1124 08:39:29.660174 4691 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 24 08:39:29 crc kubenswrapper[4691]: I1124 08:39:29.660200 4691 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-inventory\") on node \"crc\" DevicePath \"\"" Nov 24 08:39:29 crc kubenswrapper[4691]: I1124 08:39:29.660229 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zblwx\" (UniqueName: \"kubernetes.io/projected/b19b3af1-e299-46ab-b579-902390cb75a3-kube-api-access-zblwx\") on node \"crc\" DevicePath \"\"" Nov 24 08:39:29 crc kubenswrapper[4691]: I1124 08:39:29.660252 4691 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 24 08:39:29 crc kubenswrapper[4691]: I1124 08:39:29.660270 4691 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/b19b3af1-e299-46ab-b579-902390cb75a3-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Nov 24 08:39:30 crc kubenswrapper[4691]: I1124 08:39:30.068719 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp" Nov 24 08:39:30 crc kubenswrapper[4691]: I1124 08:39:30.068697 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp" event={"ID":"b19b3af1-e299-46ab-b579-902390cb75a3","Type":"ContainerDied","Data":"356b8981fee10ab7726fcfb88daea4f20dc0e656ade9a04a909195d71f133edc"} Nov 24 08:39:30 crc kubenswrapper[4691]: I1124 08:39:30.068812 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="356b8981fee10ab7726fcfb88daea4f20dc0e656ade9a04a909195d71f133edc" Nov 24 08:39:30 crc kubenswrapper[4691]: I1124 08:39:30.068873 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-zdmwv" podUID="0b7dbc62-758b-4030-9149-7e77eceeea83" containerName="registry-server" containerID="cri-o://f65a49f19ae38c9363c27a48e8c574f3aca9c6056fcbda53657da57a0b659d94" gracePeriod=2 Nov 24 08:39:31 crc kubenswrapper[4691]: I1124 08:39:31.086127 4691 generic.go:334] "Generic (PLEG): container finished" podID="0b7dbc62-758b-4030-9149-7e77eceeea83" containerID="f65a49f19ae38c9363c27a48e8c574f3aca9c6056fcbda53657da57a0b659d94" exitCode=0 Nov 24 08:39:31 crc kubenswrapper[4691]: I1124 08:39:31.086905 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zdmwv" event={"ID":"0b7dbc62-758b-4030-9149-7e77eceeea83","Type":"ContainerDied","Data":"f65a49f19ae38c9363c27a48e8c574f3aca9c6056fcbda53657da57a0b659d94"} Nov 24 08:39:31 crc kubenswrapper[4691]: I1124 08:39:31.215805 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zdmwv" Nov 24 08:39:31 crc kubenswrapper[4691]: I1124 08:39:31.295489 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b7dbc62-758b-4030-9149-7e77eceeea83-utilities\") pod \"0b7dbc62-758b-4030-9149-7e77eceeea83\" (UID: \"0b7dbc62-758b-4030-9149-7e77eceeea83\") " Nov 24 08:39:31 crc kubenswrapper[4691]: I1124 08:39:31.295592 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5h2jf\" (UniqueName: \"kubernetes.io/projected/0b7dbc62-758b-4030-9149-7e77eceeea83-kube-api-access-5h2jf\") pod \"0b7dbc62-758b-4030-9149-7e77eceeea83\" (UID: \"0b7dbc62-758b-4030-9149-7e77eceeea83\") " Nov 24 08:39:31 crc kubenswrapper[4691]: I1124 08:39:31.295910 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b7dbc62-758b-4030-9149-7e77eceeea83-catalog-content\") pod \"0b7dbc62-758b-4030-9149-7e77eceeea83\" (UID: \"0b7dbc62-758b-4030-9149-7e77eceeea83\") " Nov 24 08:39:31 crc kubenswrapper[4691]: I1124 08:39:31.297875 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b7dbc62-758b-4030-9149-7e77eceeea83-utilities" (OuterVolumeSpecName: "utilities") pod "0b7dbc62-758b-4030-9149-7e77eceeea83" (UID: "0b7dbc62-758b-4030-9149-7e77eceeea83"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:39:31 crc kubenswrapper[4691]: I1124 08:39:31.303847 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b7dbc62-758b-4030-9149-7e77eceeea83-kube-api-access-5h2jf" (OuterVolumeSpecName: "kube-api-access-5h2jf") pod "0b7dbc62-758b-4030-9149-7e77eceeea83" (UID: "0b7dbc62-758b-4030-9149-7e77eceeea83"). InnerVolumeSpecName "kube-api-access-5h2jf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:39:31 crc kubenswrapper[4691]: I1124 08:39:31.394881 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b7dbc62-758b-4030-9149-7e77eceeea83-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0b7dbc62-758b-4030-9149-7e77eceeea83" (UID: "0b7dbc62-758b-4030-9149-7e77eceeea83"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:39:31 crc kubenswrapper[4691]: I1124 08:39:31.399420 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b7dbc62-758b-4030-9149-7e77eceeea83-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 08:39:31 crc kubenswrapper[4691]: I1124 08:39:31.399504 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b7dbc62-758b-4030-9149-7e77eceeea83-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 08:39:31 crc kubenswrapper[4691]: I1124 08:39:31.399528 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5h2jf\" (UniqueName: \"kubernetes.io/projected/0b7dbc62-758b-4030-9149-7e77eceeea83-kube-api-access-5h2jf\") on node \"crc\" DevicePath \"\"" Nov 24 08:39:32 crc kubenswrapper[4691]: I1124 08:39:32.100256 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zdmwv" event={"ID":"0b7dbc62-758b-4030-9149-7e77eceeea83","Type":"ContainerDied","Data":"a24a4ccf0626531de2198f5aa90091145e346e261f4ea9ce3bfc7dea4ab2a37a"} Nov 24 08:39:32 crc kubenswrapper[4691]: I1124 08:39:32.100797 4691 scope.go:117] "RemoveContainer" containerID="f65a49f19ae38c9363c27a48e8c574f3aca9c6056fcbda53657da57a0b659d94" Nov 24 08:39:32 crc kubenswrapper[4691]: I1124 08:39:32.101033 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zdmwv" Nov 24 08:39:32 crc kubenswrapper[4691]: I1124 08:39:32.139401 4691 scope.go:117] "RemoveContainer" containerID="351fa0fc0f6c387c19e2bc5cfe7fb561d0c5ec1f94375cb517d87e8042291bdd" Nov 24 08:39:32 crc kubenswrapper[4691]: I1124 08:39:32.182591 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zdmwv"] Nov 24 08:39:32 crc kubenswrapper[4691]: I1124 08:39:32.191779 4691 scope.go:117] "RemoveContainer" containerID="8af5e22b55e78e02a0f5ed43095391eb67947e4a18a80a7ee880b10863f47739" Nov 24 08:39:32 crc kubenswrapper[4691]: I1124 08:39:32.198888 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-zdmwv"] Nov 24 08:39:32 crc kubenswrapper[4691]: I1124 08:39:32.775233 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b7dbc62-758b-4030-9149-7e77eceeea83" path="/var/lib/kubelet/pods/0b7dbc62-758b-4030-9149-7e77eceeea83/volumes" Nov 24 08:41:51 crc kubenswrapper[4691]: I1124 08:41:51.089521 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:41:51 crc kubenswrapper[4691]: I1124 08:41:51.090090 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:41:53 crc kubenswrapper[4691]: I1124 08:41:53.124305 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-twkht"] Nov 24 08:41:53 crc kubenswrapper[4691]: E1124 08:41:53.125103 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b7dbc62-758b-4030-9149-7e77eceeea83" containerName="extract-content" Nov 24 08:41:53 crc kubenswrapper[4691]: I1124 08:41:53.125120 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b7dbc62-758b-4030-9149-7e77eceeea83" containerName="extract-content" Nov 24 08:41:53 crc kubenswrapper[4691]: E1124 08:41:53.125138 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b7dbc62-758b-4030-9149-7e77eceeea83" containerName="extract-utilities" Nov 24 08:41:53 crc kubenswrapper[4691]: I1124 08:41:53.125146 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b7dbc62-758b-4030-9149-7e77eceeea83" containerName="extract-utilities" Nov 24 08:41:53 crc kubenswrapper[4691]: E1124 08:41:53.125179 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b19b3af1-e299-46ab-b579-902390cb75a3" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 24 08:41:53 crc kubenswrapper[4691]: I1124 08:41:53.125189 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="b19b3af1-e299-46ab-b579-902390cb75a3" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 24 08:41:53 crc kubenswrapper[4691]: E1124 08:41:53.125206 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b7dbc62-758b-4030-9149-7e77eceeea83" containerName="registry-server" Nov 24 08:41:53 crc kubenswrapper[4691]: I1124 08:41:53.125214 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b7dbc62-758b-4030-9149-7e77eceeea83" containerName="registry-server" Nov 24 08:41:53 crc kubenswrapper[4691]: I1124 08:41:53.125426 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="b19b3af1-e299-46ab-b579-902390cb75a3" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 24 08:41:53 crc kubenswrapper[4691]: I1124 08:41:53.125483 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b7dbc62-758b-4030-9149-7e77eceeea83" containerName="registry-server" Nov 24 08:41:53 crc kubenswrapper[4691]: I1124 08:41:53.127220 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-twkht" Nov 24 08:41:53 crc kubenswrapper[4691]: I1124 08:41:53.157726 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-twkht"] Nov 24 08:41:53 crc kubenswrapper[4691]: I1124 08:41:53.185139 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37aeedd4-e874-40d0-aee0-c5e2016cabf4-catalog-content\") pod \"community-operators-twkht\" (UID: \"37aeedd4-e874-40d0-aee0-c5e2016cabf4\") " pod="openshift-marketplace/community-operators-twkht" Nov 24 08:41:53 crc kubenswrapper[4691]: I1124 08:41:53.185212 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wtm6z\" (UniqueName: \"kubernetes.io/projected/37aeedd4-e874-40d0-aee0-c5e2016cabf4-kube-api-access-wtm6z\") pod \"community-operators-twkht\" (UID: \"37aeedd4-e874-40d0-aee0-c5e2016cabf4\") " pod="openshift-marketplace/community-operators-twkht" Nov 24 08:41:53 crc kubenswrapper[4691]: I1124 08:41:53.185263 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37aeedd4-e874-40d0-aee0-c5e2016cabf4-utilities\") pod \"community-operators-twkht\" (UID: \"37aeedd4-e874-40d0-aee0-c5e2016cabf4\") " pod="openshift-marketplace/community-operators-twkht" Nov 24 08:41:53 crc kubenswrapper[4691]: I1124 08:41:53.287343 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37aeedd4-e874-40d0-aee0-c5e2016cabf4-catalog-content\") pod \"community-operators-twkht\" (UID: \"37aeedd4-e874-40d0-aee0-c5e2016cabf4\") " pod="openshift-marketplace/community-operators-twkht" Nov 24 08:41:53 crc kubenswrapper[4691]: I1124 08:41:53.287417 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wtm6z\" (UniqueName: \"kubernetes.io/projected/37aeedd4-e874-40d0-aee0-c5e2016cabf4-kube-api-access-wtm6z\") pod \"community-operators-twkht\" (UID: \"37aeedd4-e874-40d0-aee0-c5e2016cabf4\") " pod="openshift-marketplace/community-operators-twkht" Nov 24 08:41:53 crc kubenswrapper[4691]: I1124 08:41:53.287482 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37aeedd4-e874-40d0-aee0-c5e2016cabf4-utilities\") pod \"community-operators-twkht\" (UID: \"37aeedd4-e874-40d0-aee0-c5e2016cabf4\") " pod="openshift-marketplace/community-operators-twkht" Nov 24 08:41:53 crc kubenswrapper[4691]: I1124 08:41:53.288517 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37aeedd4-e874-40d0-aee0-c5e2016cabf4-catalog-content\") pod \"community-operators-twkht\" (UID: \"37aeedd4-e874-40d0-aee0-c5e2016cabf4\") " pod="openshift-marketplace/community-operators-twkht" Nov 24 08:41:53 crc kubenswrapper[4691]: I1124 08:41:53.288596 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37aeedd4-e874-40d0-aee0-c5e2016cabf4-utilities\") pod \"community-operators-twkht\" (UID: \"37aeedd4-e874-40d0-aee0-c5e2016cabf4\") " pod="openshift-marketplace/community-operators-twkht" Nov 24 08:41:53 crc kubenswrapper[4691]: I1124 08:41:53.315811 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wtm6z\" (UniqueName: \"kubernetes.io/projected/37aeedd4-e874-40d0-aee0-c5e2016cabf4-kube-api-access-wtm6z\") pod \"community-operators-twkht\" (UID: \"37aeedd4-e874-40d0-aee0-c5e2016cabf4\") " pod="openshift-marketplace/community-operators-twkht" Nov 24 08:41:53 crc kubenswrapper[4691]: I1124 08:41:53.453515 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-twkht" Nov 24 08:41:54 crc kubenswrapper[4691]: I1124 08:41:54.093954 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-twkht"] Nov 24 08:41:54 crc kubenswrapper[4691]: I1124 08:41:54.437550 4691 generic.go:334] "Generic (PLEG): container finished" podID="37aeedd4-e874-40d0-aee0-c5e2016cabf4" containerID="6400384c0fd75bdafb30ba7ae81e6cfa76076a3ee26ece5a9a548e2ac19a42aa" exitCode=0 Nov 24 08:41:54 crc kubenswrapper[4691]: I1124 08:41:54.437665 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-twkht" event={"ID":"37aeedd4-e874-40d0-aee0-c5e2016cabf4","Type":"ContainerDied","Data":"6400384c0fd75bdafb30ba7ae81e6cfa76076a3ee26ece5a9a548e2ac19a42aa"} Nov 24 08:41:54 crc kubenswrapper[4691]: I1124 08:41:54.437989 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-twkht" event={"ID":"37aeedd4-e874-40d0-aee0-c5e2016cabf4","Type":"ContainerStarted","Data":"0c6ef9ff8500c199809d425150ae11559a83362abdaee1ed51ce0d9efdfa036c"} Nov 24 08:41:55 crc kubenswrapper[4691]: I1124 08:41:55.449570 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-twkht" event={"ID":"37aeedd4-e874-40d0-aee0-c5e2016cabf4","Type":"ContainerStarted","Data":"6404f30a3114d5fd0da4d0dd3b2df37a02ed8e8bb6a96a54f23ac463291904a9"} Nov 24 08:41:56 crc kubenswrapper[4691]: I1124 08:41:56.460511 4691 generic.go:334] "Generic (PLEG): container finished" podID="37aeedd4-e874-40d0-aee0-c5e2016cabf4" containerID="6404f30a3114d5fd0da4d0dd3b2df37a02ed8e8bb6a96a54f23ac463291904a9" exitCode=0 Nov 24 08:41:56 crc kubenswrapper[4691]: I1124 08:41:56.460631 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-twkht" event={"ID":"37aeedd4-e874-40d0-aee0-c5e2016cabf4","Type":"ContainerDied","Data":"6404f30a3114d5fd0da4d0dd3b2df37a02ed8e8bb6a96a54f23ac463291904a9"} Nov 24 08:41:57 crc kubenswrapper[4691]: I1124 08:41:57.470588 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-twkht" event={"ID":"37aeedd4-e874-40d0-aee0-c5e2016cabf4","Type":"ContainerStarted","Data":"34b8500f2296c3489e44b94bb6197161825a914852c9be8561bdfa90cc2b8527"} Nov 24 08:41:57 crc kubenswrapper[4691]: I1124 08:41:57.495439 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-twkht" podStartSLOduration=2.075891137 podStartE2EDuration="4.495416896s" podCreationTimestamp="2025-11-24 08:41:53 +0000 UTC" firstStartedPulling="2025-11-24 08:41:54.439309443 +0000 UTC m=+2676.438258692" lastFinishedPulling="2025-11-24 08:41:56.858835202 +0000 UTC m=+2678.857784451" observedRunningTime="2025-11-24 08:41:57.492653738 +0000 UTC m=+2679.491603017" watchObservedRunningTime="2025-11-24 08:41:57.495416896 +0000 UTC m=+2679.494366145" Nov 24 08:42:03 crc kubenswrapper[4691]: I1124 08:42:03.454648 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-twkht" Nov 24 08:42:03 crc kubenswrapper[4691]: I1124 08:42:03.455201 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-twkht" Nov 24 08:42:03 crc kubenswrapper[4691]: I1124 08:42:03.540758 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-twkht" Nov 24 08:42:03 crc kubenswrapper[4691]: I1124 08:42:03.597179 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-twkht" Nov 24 08:42:03 crc kubenswrapper[4691]: I1124 08:42:03.778956 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-twkht"] Nov 24 08:42:05 crc kubenswrapper[4691]: I1124 08:42:05.539319 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-twkht" podUID="37aeedd4-e874-40d0-aee0-c5e2016cabf4" containerName="registry-server" containerID="cri-o://34b8500f2296c3489e44b94bb6197161825a914852c9be8561bdfa90cc2b8527" gracePeriod=2 Nov 24 08:42:06 crc kubenswrapper[4691]: I1124 08:42:06.209629 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-twkht" Nov 24 08:42:06 crc kubenswrapper[4691]: I1124 08:42:06.274439 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wtm6z\" (UniqueName: \"kubernetes.io/projected/37aeedd4-e874-40d0-aee0-c5e2016cabf4-kube-api-access-wtm6z\") pod \"37aeedd4-e874-40d0-aee0-c5e2016cabf4\" (UID: \"37aeedd4-e874-40d0-aee0-c5e2016cabf4\") " Nov 24 08:42:06 crc kubenswrapper[4691]: I1124 08:42:06.274607 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37aeedd4-e874-40d0-aee0-c5e2016cabf4-catalog-content\") pod \"37aeedd4-e874-40d0-aee0-c5e2016cabf4\" (UID: \"37aeedd4-e874-40d0-aee0-c5e2016cabf4\") " Nov 24 08:42:06 crc kubenswrapper[4691]: I1124 08:42:06.274647 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37aeedd4-e874-40d0-aee0-c5e2016cabf4-utilities\") pod \"37aeedd4-e874-40d0-aee0-c5e2016cabf4\" (UID: \"37aeedd4-e874-40d0-aee0-c5e2016cabf4\") " Nov 24 08:42:06 crc kubenswrapper[4691]: I1124 08:42:06.275473 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37aeedd4-e874-40d0-aee0-c5e2016cabf4-utilities" (OuterVolumeSpecName: "utilities") pod "37aeedd4-e874-40d0-aee0-c5e2016cabf4" (UID: "37aeedd4-e874-40d0-aee0-c5e2016cabf4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:42:06 crc kubenswrapper[4691]: I1124 08:42:06.281284 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37aeedd4-e874-40d0-aee0-c5e2016cabf4-kube-api-access-wtm6z" (OuterVolumeSpecName: "kube-api-access-wtm6z") pod "37aeedd4-e874-40d0-aee0-c5e2016cabf4" (UID: "37aeedd4-e874-40d0-aee0-c5e2016cabf4"). InnerVolumeSpecName "kube-api-access-wtm6z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:42:06 crc kubenswrapper[4691]: I1124 08:42:06.327489 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37aeedd4-e874-40d0-aee0-c5e2016cabf4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "37aeedd4-e874-40d0-aee0-c5e2016cabf4" (UID: "37aeedd4-e874-40d0-aee0-c5e2016cabf4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:42:06 crc kubenswrapper[4691]: I1124 08:42:06.377079 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wtm6z\" (UniqueName: \"kubernetes.io/projected/37aeedd4-e874-40d0-aee0-c5e2016cabf4-kube-api-access-wtm6z\") on node \"crc\" DevicePath \"\"" Nov 24 08:42:06 crc kubenswrapper[4691]: I1124 08:42:06.377120 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37aeedd4-e874-40d0-aee0-c5e2016cabf4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 08:42:06 crc kubenswrapper[4691]: I1124 08:42:06.377145 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37aeedd4-e874-40d0-aee0-c5e2016cabf4-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 08:42:06 crc kubenswrapper[4691]: I1124 08:42:06.551595 4691 generic.go:334] "Generic (PLEG): container finished" podID="37aeedd4-e874-40d0-aee0-c5e2016cabf4" containerID="34b8500f2296c3489e44b94bb6197161825a914852c9be8561bdfa90cc2b8527" exitCode=0 Nov 24 08:42:06 crc kubenswrapper[4691]: I1124 08:42:06.551661 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-twkht" Nov 24 08:42:06 crc kubenswrapper[4691]: I1124 08:42:06.551663 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-twkht" event={"ID":"37aeedd4-e874-40d0-aee0-c5e2016cabf4","Type":"ContainerDied","Data":"34b8500f2296c3489e44b94bb6197161825a914852c9be8561bdfa90cc2b8527"} Nov 24 08:42:06 crc kubenswrapper[4691]: I1124 08:42:06.551873 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-twkht" event={"ID":"37aeedd4-e874-40d0-aee0-c5e2016cabf4","Type":"ContainerDied","Data":"0c6ef9ff8500c199809d425150ae11559a83362abdaee1ed51ce0d9efdfa036c"} Nov 24 08:42:06 crc kubenswrapper[4691]: I1124 08:42:06.551915 4691 scope.go:117] "RemoveContainer" containerID="34b8500f2296c3489e44b94bb6197161825a914852c9be8561bdfa90cc2b8527" Nov 24 08:42:06 crc kubenswrapper[4691]: I1124 08:42:06.573984 4691 scope.go:117] "RemoveContainer" containerID="6404f30a3114d5fd0da4d0dd3b2df37a02ed8e8bb6a96a54f23ac463291904a9" Nov 24 08:42:06 crc kubenswrapper[4691]: I1124 08:42:06.586508 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-twkht"] Nov 24 08:42:06 crc kubenswrapper[4691]: I1124 08:42:06.596651 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-twkht"] Nov 24 08:42:06 crc kubenswrapper[4691]: I1124 08:42:06.618115 4691 scope.go:117] "RemoveContainer" containerID="6400384c0fd75bdafb30ba7ae81e6cfa76076a3ee26ece5a9a548e2ac19a42aa" Nov 24 08:42:06 crc kubenswrapper[4691]: I1124 08:42:06.650628 4691 scope.go:117] "RemoveContainer" containerID="34b8500f2296c3489e44b94bb6197161825a914852c9be8561bdfa90cc2b8527" Nov 24 08:42:06 crc kubenswrapper[4691]: E1124 08:42:06.652873 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"34b8500f2296c3489e44b94bb6197161825a914852c9be8561bdfa90cc2b8527\": container with ID starting with 34b8500f2296c3489e44b94bb6197161825a914852c9be8561bdfa90cc2b8527 not found: ID does not exist" containerID="34b8500f2296c3489e44b94bb6197161825a914852c9be8561bdfa90cc2b8527" Nov 24 08:42:06 crc kubenswrapper[4691]: I1124 08:42:06.652912 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"34b8500f2296c3489e44b94bb6197161825a914852c9be8561bdfa90cc2b8527"} err="failed to get container status \"34b8500f2296c3489e44b94bb6197161825a914852c9be8561bdfa90cc2b8527\": rpc error: code = NotFound desc = could not find container \"34b8500f2296c3489e44b94bb6197161825a914852c9be8561bdfa90cc2b8527\": container with ID starting with 34b8500f2296c3489e44b94bb6197161825a914852c9be8561bdfa90cc2b8527 not found: ID does not exist" Nov 24 08:42:06 crc kubenswrapper[4691]: I1124 08:42:06.652933 4691 scope.go:117] "RemoveContainer" containerID="6404f30a3114d5fd0da4d0dd3b2df37a02ed8e8bb6a96a54f23ac463291904a9" Nov 24 08:42:06 crc kubenswrapper[4691]: E1124 08:42:06.653433 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6404f30a3114d5fd0da4d0dd3b2df37a02ed8e8bb6a96a54f23ac463291904a9\": container with ID starting with 6404f30a3114d5fd0da4d0dd3b2df37a02ed8e8bb6a96a54f23ac463291904a9 not found: ID does not exist" containerID="6404f30a3114d5fd0da4d0dd3b2df37a02ed8e8bb6a96a54f23ac463291904a9" Nov 24 08:42:06 crc kubenswrapper[4691]: I1124 08:42:06.653513 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6404f30a3114d5fd0da4d0dd3b2df37a02ed8e8bb6a96a54f23ac463291904a9"} err="failed to get container status \"6404f30a3114d5fd0da4d0dd3b2df37a02ed8e8bb6a96a54f23ac463291904a9\": rpc error: code = NotFound desc = could not find container \"6404f30a3114d5fd0da4d0dd3b2df37a02ed8e8bb6a96a54f23ac463291904a9\": container with ID starting with 6404f30a3114d5fd0da4d0dd3b2df37a02ed8e8bb6a96a54f23ac463291904a9 not found: ID does not exist" Nov 24 08:42:06 crc kubenswrapper[4691]: I1124 08:42:06.653567 4691 scope.go:117] "RemoveContainer" containerID="6400384c0fd75bdafb30ba7ae81e6cfa76076a3ee26ece5a9a548e2ac19a42aa" Nov 24 08:42:06 crc kubenswrapper[4691]: E1124 08:42:06.653886 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6400384c0fd75bdafb30ba7ae81e6cfa76076a3ee26ece5a9a548e2ac19a42aa\": container with ID starting with 6400384c0fd75bdafb30ba7ae81e6cfa76076a3ee26ece5a9a548e2ac19a42aa not found: ID does not exist" containerID="6400384c0fd75bdafb30ba7ae81e6cfa76076a3ee26ece5a9a548e2ac19a42aa" Nov 24 08:42:06 crc kubenswrapper[4691]: I1124 08:42:06.653921 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6400384c0fd75bdafb30ba7ae81e6cfa76076a3ee26ece5a9a548e2ac19a42aa"} err="failed to get container status \"6400384c0fd75bdafb30ba7ae81e6cfa76076a3ee26ece5a9a548e2ac19a42aa\": rpc error: code = NotFound desc = could not find container \"6400384c0fd75bdafb30ba7ae81e6cfa76076a3ee26ece5a9a548e2ac19a42aa\": container with ID starting with 6400384c0fd75bdafb30ba7ae81e6cfa76076a3ee26ece5a9a548e2ac19a42aa not found: ID does not exist" Nov 24 08:42:06 crc kubenswrapper[4691]: I1124 08:42:06.771095 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37aeedd4-e874-40d0-aee0-c5e2016cabf4" path="/var/lib/kubelet/pods/37aeedd4-e874-40d0-aee0-c5e2016cabf4/volumes" Nov 24 08:42:21 crc kubenswrapper[4691]: I1124 08:42:21.089603 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:42:21 crc kubenswrapper[4691]: I1124 08:42:21.090186 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:42:51 crc kubenswrapper[4691]: I1124 08:42:51.089166 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:42:51 crc kubenswrapper[4691]: I1124 08:42:51.090098 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:42:51 crc kubenswrapper[4691]: I1124 08:42:51.090153 4691 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 08:42:51 crc kubenswrapper[4691]: I1124 08:42:51.091071 4691 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f6d92258bfa625e20cc52564ae622942d4685b3155e92eab89a59d15f2bcf57b"} pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 08:42:51 crc kubenswrapper[4691]: I1124 08:42:51.091140 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" containerID="cri-o://f6d92258bfa625e20cc52564ae622942d4685b3155e92eab89a59d15f2bcf57b" gracePeriod=600 Nov 24 08:42:51 crc kubenswrapper[4691]: I1124 08:42:51.938852 4691 generic.go:334] "Generic (PLEG): container finished" podID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerID="f6d92258bfa625e20cc52564ae622942d4685b3155e92eab89a59d15f2bcf57b" exitCode=0 Nov 24 08:42:51 crc kubenswrapper[4691]: I1124 08:42:51.938909 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerDied","Data":"f6d92258bfa625e20cc52564ae622942d4685b3155e92eab89a59d15f2bcf57b"} Nov 24 08:42:51 crc kubenswrapper[4691]: I1124 08:42:51.939389 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerStarted","Data":"df430d301d5c839b2ad1ca52000af690a54babae6365e16b81264ecabac616c0"} Nov 24 08:42:51 crc kubenswrapper[4691]: I1124 08:42:51.939411 4691 scope.go:117] "RemoveContainer" containerID="e7b637f4980bbb9ac5b384cec7b1f94c6dc365ccdf664c7e8d7855d6a60bff20" Nov 24 08:43:00 crc kubenswrapper[4691]: I1124 08:43:00.458724 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-8lzxm"] Nov 24 08:43:00 crc kubenswrapper[4691]: E1124 08:43:00.461106 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37aeedd4-e874-40d0-aee0-c5e2016cabf4" containerName="extract-utilities" Nov 24 08:43:00 crc kubenswrapper[4691]: I1124 08:43:00.461215 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="37aeedd4-e874-40d0-aee0-c5e2016cabf4" containerName="extract-utilities" Nov 24 08:43:00 crc kubenswrapper[4691]: E1124 08:43:00.461296 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37aeedd4-e874-40d0-aee0-c5e2016cabf4" containerName="extract-content" Nov 24 08:43:00 crc kubenswrapper[4691]: I1124 08:43:00.461367 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="37aeedd4-e874-40d0-aee0-c5e2016cabf4" containerName="extract-content" Nov 24 08:43:00 crc kubenswrapper[4691]: E1124 08:43:00.461472 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37aeedd4-e874-40d0-aee0-c5e2016cabf4" containerName="registry-server" Nov 24 08:43:00 crc kubenswrapper[4691]: I1124 08:43:00.461826 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="37aeedd4-e874-40d0-aee0-c5e2016cabf4" containerName="registry-server" Nov 24 08:43:00 crc kubenswrapper[4691]: I1124 08:43:00.462128 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="37aeedd4-e874-40d0-aee0-c5e2016cabf4" containerName="registry-server" Nov 24 08:43:00 crc kubenswrapper[4691]: I1124 08:43:00.463802 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8lzxm" Nov 24 08:43:00 crc kubenswrapper[4691]: I1124 08:43:00.479652 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8lzxm"] Nov 24 08:43:00 crc kubenswrapper[4691]: I1124 08:43:00.620688 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pjkjw\" (UniqueName: \"kubernetes.io/projected/f5b32bf8-7ba9-47fb-9f3a-3318bdc10670-kube-api-access-pjkjw\") pod \"certified-operators-8lzxm\" (UID: \"f5b32bf8-7ba9-47fb-9f3a-3318bdc10670\") " pod="openshift-marketplace/certified-operators-8lzxm" Nov 24 08:43:00 crc kubenswrapper[4691]: I1124 08:43:00.620860 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5b32bf8-7ba9-47fb-9f3a-3318bdc10670-utilities\") pod \"certified-operators-8lzxm\" (UID: \"f5b32bf8-7ba9-47fb-9f3a-3318bdc10670\") " pod="openshift-marketplace/certified-operators-8lzxm" Nov 24 08:43:00 crc kubenswrapper[4691]: I1124 08:43:00.621280 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5b32bf8-7ba9-47fb-9f3a-3318bdc10670-catalog-content\") pod \"certified-operators-8lzxm\" (UID: \"f5b32bf8-7ba9-47fb-9f3a-3318bdc10670\") " pod="openshift-marketplace/certified-operators-8lzxm" Nov 24 08:43:00 crc kubenswrapper[4691]: I1124 08:43:00.722864 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5b32bf8-7ba9-47fb-9f3a-3318bdc10670-catalog-content\") pod \"certified-operators-8lzxm\" (UID: \"f5b32bf8-7ba9-47fb-9f3a-3318bdc10670\") " pod="openshift-marketplace/certified-operators-8lzxm" Nov 24 08:43:00 crc kubenswrapper[4691]: I1124 08:43:00.722961 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pjkjw\" (UniqueName: \"kubernetes.io/projected/f5b32bf8-7ba9-47fb-9f3a-3318bdc10670-kube-api-access-pjkjw\") pod \"certified-operators-8lzxm\" (UID: \"f5b32bf8-7ba9-47fb-9f3a-3318bdc10670\") " pod="openshift-marketplace/certified-operators-8lzxm" Nov 24 08:43:00 crc kubenswrapper[4691]: I1124 08:43:00.723002 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5b32bf8-7ba9-47fb-9f3a-3318bdc10670-utilities\") pod \"certified-operators-8lzxm\" (UID: \"f5b32bf8-7ba9-47fb-9f3a-3318bdc10670\") " pod="openshift-marketplace/certified-operators-8lzxm" Nov 24 08:43:00 crc kubenswrapper[4691]: I1124 08:43:00.723484 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5b32bf8-7ba9-47fb-9f3a-3318bdc10670-catalog-content\") pod \"certified-operators-8lzxm\" (UID: \"f5b32bf8-7ba9-47fb-9f3a-3318bdc10670\") " pod="openshift-marketplace/certified-operators-8lzxm" Nov 24 08:43:00 crc kubenswrapper[4691]: I1124 08:43:00.723541 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5b32bf8-7ba9-47fb-9f3a-3318bdc10670-utilities\") pod \"certified-operators-8lzxm\" (UID: \"f5b32bf8-7ba9-47fb-9f3a-3318bdc10670\") " pod="openshift-marketplace/certified-operators-8lzxm" Nov 24 08:43:00 crc kubenswrapper[4691]: I1124 08:43:00.740917 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pjkjw\" (UniqueName: \"kubernetes.io/projected/f5b32bf8-7ba9-47fb-9f3a-3318bdc10670-kube-api-access-pjkjw\") pod \"certified-operators-8lzxm\" (UID: \"f5b32bf8-7ba9-47fb-9f3a-3318bdc10670\") " pod="openshift-marketplace/certified-operators-8lzxm" Nov 24 08:43:00 crc kubenswrapper[4691]: I1124 08:43:00.788051 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8lzxm" Nov 24 08:43:01 crc kubenswrapper[4691]: I1124 08:43:01.328006 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8lzxm"] Nov 24 08:43:02 crc kubenswrapper[4691]: I1124 08:43:02.155167 4691 generic.go:334] "Generic (PLEG): container finished" podID="f5b32bf8-7ba9-47fb-9f3a-3318bdc10670" containerID="3b3779592ad955b7bcfbbdd54ef2a34aa62106d74a3aa9744adf3c964e50ed04" exitCode=0 Nov 24 08:43:02 crc kubenswrapper[4691]: I1124 08:43:02.155294 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8lzxm" event={"ID":"f5b32bf8-7ba9-47fb-9f3a-3318bdc10670","Type":"ContainerDied","Data":"3b3779592ad955b7bcfbbdd54ef2a34aa62106d74a3aa9744adf3c964e50ed04"} Nov 24 08:43:02 crc kubenswrapper[4691]: I1124 08:43:02.155569 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8lzxm" event={"ID":"f5b32bf8-7ba9-47fb-9f3a-3318bdc10670","Type":"ContainerStarted","Data":"0c711b8575a01f9795104ffc5a8e07667d32ab342dae48780f54e975015c7e4c"} Nov 24 08:43:02 crc kubenswrapper[4691]: I1124 08:43:02.853012 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-tlggw"] Nov 24 08:43:02 crc kubenswrapper[4691]: I1124 08:43:02.858545 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tlggw" Nov 24 08:43:02 crc kubenswrapper[4691]: I1124 08:43:02.892622 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tlggw"] Nov 24 08:43:02 crc kubenswrapper[4691]: I1124 08:43:02.907228 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79eeb3e9-58cd-4715-a9df-3d1e426ed5da-utilities\") pod \"redhat-marketplace-tlggw\" (UID: \"79eeb3e9-58cd-4715-a9df-3d1e426ed5da\") " pod="openshift-marketplace/redhat-marketplace-tlggw" Nov 24 08:43:02 crc kubenswrapper[4691]: I1124 08:43:02.907716 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79eeb3e9-58cd-4715-a9df-3d1e426ed5da-catalog-content\") pod \"redhat-marketplace-tlggw\" (UID: \"79eeb3e9-58cd-4715-a9df-3d1e426ed5da\") " pod="openshift-marketplace/redhat-marketplace-tlggw" Nov 24 08:43:02 crc kubenswrapper[4691]: I1124 08:43:02.907810 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vfrc8\" (UniqueName: \"kubernetes.io/projected/79eeb3e9-58cd-4715-a9df-3d1e426ed5da-kube-api-access-vfrc8\") pod \"redhat-marketplace-tlggw\" (UID: \"79eeb3e9-58cd-4715-a9df-3d1e426ed5da\") " pod="openshift-marketplace/redhat-marketplace-tlggw" Nov 24 08:43:03 crc kubenswrapper[4691]: I1124 08:43:03.009060 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79eeb3e9-58cd-4715-a9df-3d1e426ed5da-catalog-content\") pod \"redhat-marketplace-tlggw\" (UID: \"79eeb3e9-58cd-4715-a9df-3d1e426ed5da\") " pod="openshift-marketplace/redhat-marketplace-tlggw" Nov 24 08:43:03 crc kubenswrapper[4691]: I1124 08:43:03.009132 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vfrc8\" (UniqueName: \"kubernetes.io/projected/79eeb3e9-58cd-4715-a9df-3d1e426ed5da-kube-api-access-vfrc8\") pod \"redhat-marketplace-tlggw\" (UID: \"79eeb3e9-58cd-4715-a9df-3d1e426ed5da\") " pod="openshift-marketplace/redhat-marketplace-tlggw" Nov 24 08:43:03 crc kubenswrapper[4691]: I1124 08:43:03.009218 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79eeb3e9-58cd-4715-a9df-3d1e426ed5da-utilities\") pod \"redhat-marketplace-tlggw\" (UID: \"79eeb3e9-58cd-4715-a9df-3d1e426ed5da\") " pod="openshift-marketplace/redhat-marketplace-tlggw" Nov 24 08:43:03 crc kubenswrapper[4691]: I1124 08:43:03.010247 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79eeb3e9-58cd-4715-a9df-3d1e426ed5da-catalog-content\") pod \"redhat-marketplace-tlggw\" (UID: \"79eeb3e9-58cd-4715-a9df-3d1e426ed5da\") " pod="openshift-marketplace/redhat-marketplace-tlggw" Nov 24 08:43:03 crc kubenswrapper[4691]: I1124 08:43:03.010467 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79eeb3e9-58cd-4715-a9df-3d1e426ed5da-utilities\") pod \"redhat-marketplace-tlggw\" (UID: \"79eeb3e9-58cd-4715-a9df-3d1e426ed5da\") " pod="openshift-marketplace/redhat-marketplace-tlggw" Nov 24 08:43:03 crc kubenswrapper[4691]: I1124 08:43:03.039376 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vfrc8\" (UniqueName: \"kubernetes.io/projected/79eeb3e9-58cd-4715-a9df-3d1e426ed5da-kube-api-access-vfrc8\") pod \"redhat-marketplace-tlggw\" (UID: \"79eeb3e9-58cd-4715-a9df-3d1e426ed5da\") " pod="openshift-marketplace/redhat-marketplace-tlggw" Nov 24 08:43:03 crc kubenswrapper[4691]: I1124 08:43:03.209932 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tlggw" Nov 24 08:43:03 crc kubenswrapper[4691]: I1124 08:43:03.702942 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tlggw"] Nov 24 08:43:04 crc kubenswrapper[4691]: I1124 08:43:04.177353 4691 generic.go:334] "Generic (PLEG): container finished" podID="f5b32bf8-7ba9-47fb-9f3a-3318bdc10670" containerID="cb0eaa0bf06450eb4cfc24a7dd792c998dc930edd47cf8f589f59aefd0837b2d" exitCode=0 Nov 24 08:43:04 crc kubenswrapper[4691]: I1124 08:43:04.177413 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8lzxm" event={"ID":"f5b32bf8-7ba9-47fb-9f3a-3318bdc10670","Type":"ContainerDied","Data":"cb0eaa0bf06450eb4cfc24a7dd792c998dc930edd47cf8f589f59aefd0837b2d"} Nov 24 08:43:04 crc kubenswrapper[4691]: I1124 08:43:04.180874 4691 generic.go:334] "Generic (PLEG): container finished" podID="79eeb3e9-58cd-4715-a9df-3d1e426ed5da" containerID="49a757d11fda77caa624498325ad9116f76193a645165b5141457dc03cef981d" exitCode=0 Nov 24 08:43:04 crc kubenswrapper[4691]: I1124 08:43:04.180909 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tlggw" event={"ID":"79eeb3e9-58cd-4715-a9df-3d1e426ed5da","Type":"ContainerDied","Data":"49a757d11fda77caa624498325ad9116f76193a645165b5141457dc03cef981d"} Nov 24 08:43:04 crc kubenswrapper[4691]: I1124 08:43:04.180932 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tlggw" event={"ID":"79eeb3e9-58cd-4715-a9df-3d1e426ed5da","Type":"ContainerStarted","Data":"964204cae7cb2759358255fcb812ce2ac2c1000d655e926052b2bf00a7cb1f71"} Nov 24 08:43:05 crc kubenswrapper[4691]: I1124 08:43:05.190652 4691 generic.go:334] "Generic (PLEG): container finished" podID="79eeb3e9-58cd-4715-a9df-3d1e426ed5da" containerID="1837bd8cfbe4d7dd9905252cafb7477fa5f859222c274447a13b6f8ae37dca21" exitCode=0 Nov 24 08:43:05 crc kubenswrapper[4691]: I1124 08:43:05.190742 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tlggw" event={"ID":"79eeb3e9-58cd-4715-a9df-3d1e426ed5da","Type":"ContainerDied","Data":"1837bd8cfbe4d7dd9905252cafb7477fa5f859222c274447a13b6f8ae37dca21"} Nov 24 08:43:05 crc kubenswrapper[4691]: I1124 08:43:05.195420 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8lzxm" event={"ID":"f5b32bf8-7ba9-47fb-9f3a-3318bdc10670","Type":"ContainerStarted","Data":"c6b1d6db0a74145f80b1ea67de1f9baace7743ab0222e896fc80f40a8c17d187"} Nov 24 08:43:05 crc kubenswrapper[4691]: I1124 08:43:05.236072 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-8lzxm" podStartSLOduration=2.84612338 podStartE2EDuration="5.236050721s" podCreationTimestamp="2025-11-24 08:43:00 +0000 UTC" firstStartedPulling="2025-11-24 08:43:02.159516403 +0000 UTC m=+2744.158465692" lastFinishedPulling="2025-11-24 08:43:04.549443784 +0000 UTC m=+2746.548393033" observedRunningTime="2025-11-24 08:43:05.230271787 +0000 UTC m=+2747.229221036" watchObservedRunningTime="2025-11-24 08:43:05.236050721 +0000 UTC m=+2747.234999970" Nov 24 08:43:06 crc kubenswrapper[4691]: I1124 08:43:06.204890 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tlggw" event={"ID":"79eeb3e9-58cd-4715-a9df-3d1e426ed5da","Type":"ContainerStarted","Data":"4db8a8e39a213b210007c68b3d6c8c2bf67aa530e29d4ea586bf470055d82154"} Nov 24 08:43:06 crc kubenswrapper[4691]: I1124 08:43:06.231012 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-tlggw" podStartSLOduration=2.798101731 podStartE2EDuration="4.230990787s" podCreationTimestamp="2025-11-24 08:43:02 +0000 UTC" firstStartedPulling="2025-11-24 08:43:04.182386686 +0000 UTC m=+2746.181335935" lastFinishedPulling="2025-11-24 08:43:05.615275742 +0000 UTC m=+2747.614224991" observedRunningTime="2025-11-24 08:43:06.22402125 +0000 UTC m=+2748.222970499" watchObservedRunningTime="2025-11-24 08:43:06.230990787 +0000 UTC m=+2748.229940036" Nov 24 08:43:10 crc kubenswrapper[4691]: I1124 08:43:10.788523 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-8lzxm" Nov 24 08:43:10 crc kubenswrapper[4691]: I1124 08:43:10.789067 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-8lzxm" Nov 24 08:43:10 crc kubenswrapper[4691]: I1124 08:43:10.836556 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-8lzxm" Nov 24 08:43:11 crc kubenswrapper[4691]: I1124 08:43:11.295535 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-8lzxm" Nov 24 08:43:11 crc kubenswrapper[4691]: I1124 08:43:11.355117 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8lzxm"] Nov 24 08:43:13 crc kubenswrapper[4691]: I1124 08:43:13.210366 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-tlggw" Nov 24 08:43:13 crc kubenswrapper[4691]: I1124 08:43:13.210468 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-tlggw" Nov 24 08:43:13 crc kubenswrapper[4691]: I1124 08:43:13.257547 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-tlggw" Nov 24 08:43:13 crc kubenswrapper[4691]: I1124 08:43:13.274191 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-8lzxm" podUID="f5b32bf8-7ba9-47fb-9f3a-3318bdc10670" containerName="registry-server" containerID="cri-o://c6b1d6db0a74145f80b1ea67de1f9baace7743ab0222e896fc80f40a8c17d187" gracePeriod=2 Nov 24 08:43:13 crc kubenswrapper[4691]: I1124 08:43:13.329194 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-tlggw" Nov 24 08:43:13 crc kubenswrapper[4691]: I1124 08:43:13.929597 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8lzxm" Nov 24 08:43:14 crc kubenswrapper[4691]: I1124 08:43:14.012191 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjkjw\" (UniqueName: \"kubernetes.io/projected/f5b32bf8-7ba9-47fb-9f3a-3318bdc10670-kube-api-access-pjkjw\") pod \"f5b32bf8-7ba9-47fb-9f3a-3318bdc10670\" (UID: \"f5b32bf8-7ba9-47fb-9f3a-3318bdc10670\") " Nov 24 08:43:14 crc kubenswrapper[4691]: I1124 08:43:14.012378 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5b32bf8-7ba9-47fb-9f3a-3318bdc10670-utilities\") pod \"f5b32bf8-7ba9-47fb-9f3a-3318bdc10670\" (UID: \"f5b32bf8-7ba9-47fb-9f3a-3318bdc10670\") " Nov 24 08:43:14 crc kubenswrapper[4691]: I1124 08:43:14.012419 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5b32bf8-7ba9-47fb-9f3a-3318bdc10670-catalog-content\") pod \"f5b32bf8-7ba9-47fb-9f3a-3318bdc10670\" (UID: \"f5b32bf8-7ba9-47fb-9f3a-3318bdc10670\") " Nov 24 08:43:14 crc kubenswrapper[4691]: I1124 08:43:14.013408 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5b32bf8-7ba9-47fb-9f3a-3318bdc10670-utilities" (OuterVolumeSpecName: "utilities") pod "f5b32bf8-7ba9-47fb-9f3a-3318bdc10670" (UID: "f5b32bf8-7ba9-47fb-9f3a-3318bdc10670"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:43:14 crc kubenswrapper[4691]: I1124 08:43:14.022902 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5b32bf8-7ba9-47fb-9f3a-3318bdc10670-kube-api-access-pjkjw" (OuterVolumeSpecName: "kube-api-access-pjkjw") pod "f5b32bf8-7ba9-47fb-9f3a-3318bdc10670" (UID: "f5b32bf8-7ba9-47fb-9f3a-3318bdc10670"). InnerVolumeSpecName "kube-api-access-pjkjw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:43:14 crc kubenswrapper[4691]: I1124 08:43:14.071561 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5b32bf8-7ba9-47fb-9f3a-3318bdc10670-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f5b32bf8-7ba9-47fb-9f3a-3318bdc10670" (UID: "f5b32bf8-7ba9-47fb-9f3a-3318bdc10670"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:43:14 crc kubenswrapper[4691]: I1124 08:43:14.127060 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjkjw\" (UniqueName: \"kubernetes.io/projected/f5b32bf8-7ba9-47fb-9f3a-3318bdc10670-kube-api-access-pjkjw\") on node \"crc\" DevicePath \"\"" Nov 24 08:43:14 crc kubenswrapper[4691]: I1124 08:43:14.127108 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5b32bf8-7ba9-47fb-9f3a-3318bdc10670-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 08:43:14 crc kubenswrapper[4691]: I1124 08:43:14.127121 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5b32bf8-7ba9-47fb-9f3a-3318bdc10670-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 08:43:14 crc kubenswrapper[4691]: I1124 08:43:14.284937 4691 generic.go:334] "Generic (PLEG): container finished" podID="f5b32bf8-7ba9-47fb-9f3a-3318bdc10670" containerID="c6b1d6db0a74145f80b1ea67de1f9baace7743ab0222e896fc80f40a8c17d187" exitCode=0 Nov 24 08:43:14 crc kubenswrapper[4691]: I1124 08:43:14.284982 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8lzxm" Nov 24 08:43:14 crc kubenswrapper[4691]: I1124 08:43:14.285007 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8lzxm" event={"ID":"f5b32bf8-7ba9-47fb-9f3a-3318bdc10670","Type":"ContainerDied","Data":"c6b1d6db0a74145f80b1ea67de1f9baace7743ab0222e896fc80f40a8c17d187"} Nov 24 08:43:14 crc kubenswrapper[4691]: I1124 08:43:14.285064 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8lzxm" event={"ID":"f5b32bf8-7ba9-47fb-9f3a-3318bdc10670","Type":"ContainerDied","Data":"0c711b8575a01f9795104ffc5a8e07667d32ab342dae48780f54e975015c7e4c"} Nov 24 08:43:14 crc kubenswrapper[4691]: I1124 08:43:14.285085 4691 scope.go:117] "RemoveContainer" containerID="c6b1d6db0a74145f80b1ea67de1f9baace7743ab0222e896fc80f40a8c17d187" Nov 24 08:43:14 crc kubenswrapper[4691]: I1124 08:43:14.329805 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8lzxm"] Nov 24 08:43:14 crc kubenswrapper[4691]: I1124 08:43:14.330233 4691 scope.go:117] "RemoveContainer" containerID="cb0eaa0bf06450eb4cfc24a7dd792c998dc930edd47cf8f589f59aefd0837b2d" Nov 24 08:43:14 crc kubenswrapper[4691]: I1124 08:43:14.338241 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-8lzxm"] Nov 24 08:43:14 crc kubenswrapper[4691]: I1124 08:43:14.352133 4691 scope.go:117] "RemoveContainer" containerID="3b3779592ad955b7bcfbbdd54ef2a34aa62106d74a3aa9744adf3c964e50ed04" Nov 24 08:43:14 crc kubenswrapper[4691]: I1124 08:43:14.420872 4691 scope.go:117] "RemoveContainer" containerID="c6b1d6db0a74145f80b1ea67de1f9baace7743ab0222e896fc80f40a8c17d187" Nov 24 08:43:14 crc kubenswrapper[4691]: E1124 08:43:14.421342 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c6b1d6db0a74145f80b1ea67de1f9baace7743ab0222e896fc80f40a8c17d187\": container with ID starting with c6b1d6db0a74145f80b1ea67de1f9baace7743ab0222e896fc80f40a8c17d187 not found: ID does not exist" containerID="c6b1d6db0a74145f80b1ea67de1f9baace7743ab0222e896fc80f40a8c17d187" Nov 24 08:43:14 crc kubenswrapper[4691]: I1124 08:43:14.421383 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6b1d6db0a74145f80b1ea67de1f9baace7743ab0222e896fc80f40a8c17d187"} err="failed to get container status \"c6b1d6db0a74145f80b1ea67de1f9baace7743ab0222e896fc80f40a8c17d187\": rpc error: code = NotFound desc = could not find container \"c6b1d6db0a74145f80b1ea67de1f9baace7743ab0222e896fc80f40a8c17d187\": container with ID starting with c6b1d6db0a74145f80b1ea67de1f9baace7743ab0222e896fc80f40a8c17d187 not found: ID does not exist" Nov 24 08:43:14 crc kubenswrapper[4691]: I1124 08:43:14.421409 4691 scope.go:117] "RemoveContainer" containerID="cb0eaa0bf06450eb4cfc24a7dd792c998dc930edd47cf8f589f59aefd0837b2d" Nov 24 08:43:14 crc kubenswrapper[4691]: E1124 08:43:14.421774 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb0eaa0bf06450eb4cfc24a7dd792c998dc930edd47cf8f589f59aefd0837b2d\": container with ID starting with cb0eaa0bf06450eb4cfc24a7dd792c998dc930edd47cf8f589f59aefd0837b2d not found: ID does not exist" containerID="cb0eaa0bf06450eb4cfc24a7dd792c998dc930edd47cf8f589f59aefd0837b2d" Nov 24 08:43:14 crc kubenswrapper[4691]: I1124 08:43:14.421804 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb0eaa0bf06450eb4cfc24a7dd792c998dc930edd47cf8f589f59aefd0837b2d"} err="failed to get container status \"cb0eaa0bf06450eb4cfc24a7dd792c998dc930edd47cf8f589f59aefd0837b2d\": rpc error: code = NotFound desc = could not find container \"cb0eaa0bf06450eb4cfc24a7dd792c998dc930edd47cf8f589f59aefd0837b2d\": container with ID starting with cb0eaa0bf06450eb4cfc24a7dd792c998dc930edd47cf8f589f59aefd0837b2d not found: ID does not exist" Nov 24 08:43:14 crc kubenswrapper[4691]: I1124 08:43:14.421823 4691 scope.go:117] "RemoveContainer" containerID="3b3779592ad955b7bcfbbdd54ef2a34aa62106d74a3aa9744adf3c964e50ed04" Nov 24 08:43:14 crc kubenswrapper[4691]: E1124 08:43:14.422126 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3b3779592ad955b7bcfbbdd54ef2a34aa62106d74a3aa9744adf3c964e50ed04\": container with ID starting with 3b3779592ad955b7bcfbbdd54ef2a34aa62106d74a3aa9744adf3c964e50ed04 not found: ID does not exist" containerID="3b3779592ad955b7bcfbbdd54ef2a34aa62106d74a3aa9744adf3c964e50ed04" Nov 24 08:43:14 crc kubenswrapper[4691]: I1124 08:43:14.422158 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3b3779592ad955b7bcfbbdd54ef2a34aa62106d74a3aa9744adf3c964e50ed04"} err="failed to get container status \"3b3779592ad955b7bcfbbdd54ef2a34aa62106d74a3aa9744adf3c964e50ed04\": rpc error: code = NotFound desc = could not find container \"3b3779592ad955b7bcfbbdd54ef2a34aa62106d74a3aa9744adf3c964e50ed04\": container with ID starting with 3b3779592ad955b7bcfbbdd54ef2a34aa62106d74a3aa9744adf3c964e50ed04 not found: ID does not exist" Nov 24 08:43:14 crc kubenswrapper[4691]: I1124 08:43:14.771147 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f5b32bf8-7ba9-47fb-9f3a-3318bdc10670" path="/var/lib/kubelet/pods/f5b32bf8-7ba9-47fb-9f3a-3318bdc10670/volumes" Nov 24 08:43:14 crc kubenswrapper[4691]: I1124 08:43:14.882656 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tlggw"] Nov 24 08:43:15 crc kubenswrapper[4691]: I1124 08:43:15.297000 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-tlggw" podUID="79eeb3e9-58cd-4715-a9df-3d1e426ed5da" containerName="registry-server" containerID="cri-o://4db8a8e39a213b210007c68b3d6c8c2bf67aa530e29d4ea586bf470055d82154" gracePeriod=2 Nov 24 08:43:15 crc kubenswrapper[4691]: I1124 08:43:15.925723 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tlggw" Nov 24 08:43:16 crc kubenswrapper[4691]: I1124 08:43:16.062026 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vfrc8\" (UniqueName: \"kubernetes.io/projected/79eeb3e9-58cd-4715-a9df-3d1e426ed5da-kube-api-access-vfrc8\") pod \"79eeb3e9-58cd-4715-a9df-3d1e426ed5da\" (UID: \"79eeb3e9-58cd-4715-a9df-3d1e426ed5da\") " Nov 24 08:43:16 crc kubenswrapper[4691]: I1124 08:43:16.062206 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79eeb3e9-58cd-4715-a9df-3d1e426ed5da-utilities\") pod \"79eeb3e9-58cd-4715-a9df-3d1e426ed5da\" (UID: \"79eeb3e9-58cd-4715-a9df-3d1e426ed5da\") " Nov 24 08:43:16 crc kubenswrapper[4691]: I1124 08:43:16.062238 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79eeb3e9-58cd-4715-a9df-3d1e426ed5da-catalog-content\") pod \"79eeb3e9-58cd-4715-a9df-3d1e426ed5da\" (UID: \"79eeb3e9-58cd-4715-a9df-3d1e426ed5da\") " Nov 24 08:43:16 crc kubenswrapper[4691]: I1124 08:43:16.063663 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/79eeb3e9-58cd-4715-a9df-3d1e426ed5da-utilities" (OuterVolumeSpecName: "utilities") pod "79eeb3e9-58cd-4715-a9df-3d1e426ed5da" (UID: "79eeb3e9-58cd-4715-a9df-3d1e426ed5da"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:43:16 crc kubenswrapper[4691]: I1124 08:43:16.071059 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79eeb3e9-58cd-4715-a9df-3d1e426ed5da-kube-api-access-vfrc8" (OuterVolumeSpecName: "kube-api-access-vfrc8") pod "79eeb3e9-58cd-4715-a9df-3d1e426ed5da" (UID: "79eeb3e9-58cd-4715-a9df-3d1e426ed5da"). InnerVolumeSpecName "kube-api-access-vfrc8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:43:16 crc kubenswrapper[4691]: I1124 08:43:16.089473 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/79eeb3e9-58cd-4715-a9df-3d1e426ed5da-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "79eeb3e9-58cd-4715-a9df-3d1e426ed5da" (UID: "79eeb3e9-58cd-4715-a9df-3d1e426ed5da"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:43:16 crc kubenswrapper[4691]: I1124 08:43:16.164870 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vfrc8\" (UniqueName: \"kubernetes.io/projected/79eeb3e9-58cd-4715-a9df-3d1e426ed5da-kube-api-access-vfrc8\") on node \"crc\" DevicePath \"\"" Nov 24 08:43:16 crc kubenswrapper[4691]: I1124 08:43:16.164904 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79eeb3e9-58cd-4715-a9df-3d1e426ed5da-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 08:43:16 crc kubenswrapper[4691]: I1124 08:43:16.164916 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79eeb3e9-58cd-4715-a9df-3d1e426ed5da-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 08:43:16 crc kubenswrapper[4691]: I1124 08:43:16.305891 4691 generic.go:334] "Generic (PLEG): container finished" podID="79eeb3e9-58cd-4715-a9df-3d1e426ed5da" containerID="4db8a8e39a213b210007c68b3d6c8c2bf67aa530e29d4ea586bf470055d82154" exitCode=0 Nov 24 08:43:16 crc kubenswrapper[4691]: I1124 08:43:16.305959 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tlggw" Nov 24 08:43:16 crc kubenswrapper[4691]: I1124 08:43:16.307328 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tlggw" event={"ID":"79eeb3e9-58cd-4715-a9df-3d1e426ed5da","Type":"ContainerDied","Data":"4db8a8e39a213b210007c68b3d6c8c2bf67aa530e29d4ea586bf470055d82154"} Nov 24 08:43:16 crc kubenswrapper[4691]: I1124 08:43:16.307602 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tlggw" event={"ID":"79eeb3e9-58cd-4715-a9df-3d1e426ed5da","Type":"ContainerDied","Data":"964204cae7cb2759358255fcb812ce2ac2c1000d655e926052b2bf00a7cb1f71"} Nov 24 08:43:16 crc kubenswrapper[4691]: I1124 08:43:16.307734 4691 scope.go:117] "RemoveContainer" containerID="4db8a8e39a213b210007c68b3d6c8c2bf67aa530e29d4ea586bf470055d82154" Nov 24 08:43:16 crc kubenswrapper[4691]: I1124 08:43:16.340217 4691 scope.go:117] "RemoveContainer" containerID="1837bd8cfbe4d7dd9905252cafb7477fa5f859222c274447a13b6f8ae37dca21" Nov 24 08:43:16 crc kubenswrapper[4691]: I1124 08:43:16.347642 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tlggw"] Nov 24 08:43:16 crc kubenswrapper[4691]: I1124 08:43:16.361333 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-tlggw"] Nov 24 08:43:16 crc kubenswrapper[4691]: I1124 08:43:16.367854 4691 scope.go:117] "RemoveContainer" containerID="49a757d11fda77caa624498325ad9116f76193a645165b5141457dc03cef981d" Nov 24 08:43:16 crc kubenswrapper[4691]: I1124 08:43:16.405979 4691 scope.go:117] "RemoveContainer" containerID="4db8a8e39a213b210007c68b3d6c8c2bf67aa530e29d4ea586bf470055d82154" Nov 24 08:43:16 crc kubenswrapper[4691]: E1124 08:43:16.406388 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4db8a8e39a213b210007c68b3d6c8c2bf67aa530e29d4ea586bf470055d82154\": container with ID starting with 4db8a8e39a213b210007c68b3d6c8c2bf67aa530e29d4ea586bf470055d82154 not found: ID does not exist" containerID="4db8a8e39a213b210007c68b3d6c8c2bf67aa530e29d4ea586bf470055d82154" Nov 24 08:43:16 crc kubenswrapper[4691]: I1124 08:43:16.406438 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4db8a8e39a213b210007c68b3d6c8c2bf67aa530e29d4ea586bf470055d82154"} err="failed to get container status \"4db8a8e39a213b210007c68b3d6c8c2bf67aa530e29d4ea586bf470055d82154\": rpc error: code = NotFound desc = could not find container \"4db8a8e39a213b210007c68b3d6c8c2bf67aa530e29d4ea586bf470055d82154\": container with ID starting with 4db8a8e39a213b210007c68b3d6c8c2bf67aa530e29d4ea586bf470055d82154 not found: ID does not exist" Nov 24 08:43:16 crc kubenswrapper[4691]: I1124 08:43:16.406484 4691 scope.go:117] "RemoveContainer" containerID="1837bd8cfbe4d7dd9905252cafb7477fa5f859222c274447a13b6f8ae37dca21" Nov 24 08:43:16 crc kubenswrapper[4691]: E1124 08:43:16.406844 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1837bd8cfbe4d7dd9905252cafb7477fa5f859222c274447a13b6f8ae37dca21\": container with ID starting with 1837bd8cfbe4d7dd9905252cafb7477fa5f859222c274447a13b6f8ae37dca21 not found: ID does not exist" containerID="1837bd8cfbe4d7dd9905252cafb7477fa5f859222c274447a13b6f8ae37dca21" Nov 24 08:43:16 crc kubenswrapper[4691]: I1124 08:43:16.406880 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1837bd8cfbe4d7dd9905252cafb7477fa5f859222c274447a13b6f8ae37dca21"} err="failed to get container status \"1837bd8cfbe4d7dd9905252cafb7477fa5f859222c274447a13b6f8ae37dca21\": rpc error: code = NotFound desc = could not find container \"1837bd8cfbe4d7dd9905252cafb7477fa5f859222c274447a13b6f8ae37dca21\": container with ID starting with 1837bd8cfbe4d7dd9905252cafb7477fa5f859222c274447a13b6f8ae37dca21 not found: ID does not exist" Nov 24 08:43:16 crc kubenswrapper[4691]: I1124 08:43:16.406902 4691 scope.go:117] "RemoveContainer" containerID="49a757d11fda77caa624498325ad9116f76193a645165b5141457dc03cef981d" Nov 24 08:43:16 crc kubenswrapper[4691]: E1124 08:43:16.407157 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"49a757d11fda77caa624498325ad9116f76193a645165b5141457dc03cef981d\": container with ID starting with 49a757d11fda77caa624498325ad9116f76193a645165b5141457dc03cef981d not found: ID does not exist" containerID="49a757d11fda77caa624498325ad9116f76193a645165b5141457dc03cef981d" Nov 24 08:43:16 crc kubenswrapper[4691]: I1124 08:43:16.407190 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49a757d11fda77caa624498325ad9116f76193a645165b5141457dc03cef981d"} err="failed to get container status \"49a757d11fda77caa624498325ad9116f76193a645165b5141457dc03cef981d\": rpc error: code = NotFound desc = could not find container \"49a757d11fda77caa624498325ad9116f76193a645165b5141457dc03cef981d\": container with ID starting with 49a757d11fda77caa624498325ad9116f76193a645165b5141457dc03cef981d not found: ID does not exist" Nov 24 08:43:16 crc kubenswrapper[4691]: I1124 08:43:16.776932 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79eeb3e9-58cd-4715-a9df-3d1e426ed5da" path="/var/lib/kubelet/pods/79eeb3e9-58cd-4715-a9df-3d1e426ed5da/volumes" Nov 24 08:44:51 crc kubenswrapper[4691]: I1124 08:44:51.089776 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:44:51 crc kubenswrapper[4691]: I1124 08:44:51.090327 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:45:00 crc kubenswrapper[4691]: I1124 08:45:00.150816 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399565-77j4v"] Nov 24 08:45:00 crc kubenswrapper[4691]: E1124 08:45:00.151747 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5b32bf8-7ba9-47fb-9f3a-3318bdc10670" containerName="extract-content" Nov 24 08:45:00 crc kubenswrapper[4691]: I1124 08:45:00.151766 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5b32bf8-7ba9-47fb-9f3a-3318bdc10670" containerName="extract-content" Nov 24 08:45:00 crc kubenswrapper[4691]: E1124 08:45:00.151781 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79eeb3e9-58cd-4715-a9df-3d1e426ed5da" containerName="extract-content" Nov 24 08:45:00 crc kubenswrapper[4691]: I1124 08:45:00.151790 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="79eeb3e9-58cd-4715-a9df-3d1e426ed5da" containerName="extract-content" Nov 24 08:45:00 crc kubenswrapper[4691]: E1124 08:45:00.151820 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79eeb3e9-58cd-4715-a9df-3d1e426ed5da" containerName="registry-server" Nov 24 08:45:00 crc kubenswrapper[4691]: I1124 08:45:00.151828 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="79eeb3e9-58cd-4715-a9df-3d1e426ed5da" containerName="registry-server" Nov 24 08:45:00 crc kubenswrapper[4691]: E1124 08:45:00.151847 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5b32bf8-7ba9-47fb-9f3a-3318bdc10670" containerName="extract-utilities" Nov 24 08:45:00 crc kubenswrapper[4691]: I1124 08:45:00.151855 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5b32bf8-7ba9-47fb-9f3a-3318bdc10670" containerName="extract-utilities" Nov 24 08:45:00 crc kubenswrapper[4691]: E1124 08:45:00.151866 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5b32bf8-7ba9-47fb-9f3a-3318bdc10670" containerName="registry-server" Nov 24 08:45:00 crc kubenswrapper[4691]: I1124 08:45:00.151873 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5b32bf8-7ba9-47fb-9f3a-3318bdc10670" containerName="registry-server" Nov 24 08:45:00 crc kubenswrapper[4691]: E1124 08:45:00.151885 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79eeb3e9-58cd-4715-a9df-3d1e426ed5da" containerName="extract-utilities" Nov 24 08:45:00 crc kubenswrapper[4691]: I1124 08:45:00.151892 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="79eeb3e9-58cd-4715-a9df-3d1e426ed5da" containerName="extract-utilities" Nov 24 08:45:00 crc kubenswrapper[4691]: I1124 08:45:00.152111 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="79eeb3e9-58cd-4715-a9df-3d1e426ed5da" containerName="registry-server" Nov 24 08:45:00 crc kubenswrapper[4691]: I1124 08:45:00.152126 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5b32bf8-7ba9-47fb-9f3a-3318bdc10670" containerName="registry-server" Nov 24 08:45:00 crc kubenswrapper[4691]: I1124 08:45:00.152922 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399565-77j4v" Nov 24 08:45:00 crc kubenswrapper[4691]: I1124 08:45:00.157011 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 24 08:45:00 crc kubenswrapper[4691]: I1124 08:45:00.158360 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 24 08:45:00 crc kubenswrapper[4691]: I1124 08:45:00.160260 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399565-77j4v"] Nov 24 08:45:00 crc kubenswrapper[4691]: I1124 08:45:00.332571 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/181c94c0-135e-4fdf-ab39-fd0326a29b74-config-volume\") pod \"collect-profiles-29399565-77j4v\" (UID: \"181c94c0-135e-4fdf-ab39-fd0326a29b74\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399565-77j4v" Nov 24 08:45:00 crc kubenswrapper[4691]: I1124 08:45:00.332837 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/181c94c0-135e-4fdf-ab39-fd0326a29b74-secret-volume\") pod \"collect-profiles-29399565-77j4v\" (UID: \"181c94c0-135e-4fdf-ab39-fd0326a29b74\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399565-77j4v" Nov 24 08:45:00 crc kubenswrapper[4691]: I1124 08:45:00.332872 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hxdww\" (UniqueName: \"kubernetes.io/projected/181c94c0-135e-4fdf-ab39-fd0326a29b74-kube-api-access-hxdww\") pod \"collect-profiles-29399565-77j4v\" (UID: \"181c94c0-135e-4fdf-ab39-fd0326a29b74\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399565-77j4v" Nov 24 08:45:00 crc kubenswrapper[4691]: I1124 08:45:00.435787 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/181c94c0-135e-4fdf-ab39-fd0326a29b74-config-volume\") pod \"collect-profiles-29399565-77j4v\" (UID: \"181c94c0-135e-4fdf-ab39-fd0326a29b74\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399565-77j4v" Nov 24 08:45:00 crc kubenswrapper[4691]: I1124 08:45:00.435858 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/181c94c0-135e-4fdf-ab39-fd0326a29b74-secret-volume\") pod \"collect-profiles-29399565-77j4v\" (UID: \"181c94c0-135e-4fdf-ab39-fd0326a29b74\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399565-77j4v" Nov 24 08:45:00 crc kubenswrapper[4691]: I1124 08:45:00.435898 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hxdww\" (UniqueName: \"kubernetes.io/projected/181c94c0-135e-4fdf-ab39-fd0326a29b74-kube-api-access-hxdww\") pod \"collect-profiles-29399565-77j4v\" (UID: \"181c94c0-135e-4fdf-ab39-fd0326a29b74\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399565-77j4v" Nov 24 08:45:00 crc kubenswrapper[4691]: I1124 08:45:00.437569 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/181c94c0-135e-4fdf-ab39-fd0326a29b74-config-volume\") pod \"collect-profiles-29399565-77j4v\" (UID: \"181c94c0-135e-4fdf-ab39-fd0326a29b74\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399565-77j4v" Nov 24 08:45:00 crc kubenswrapper[4691]: I1124 08:45:00.455953 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/181c94c0-135e-4fdf-ab39-fd0326a29b74-secret-volume\") pod \"collect-profiles-29399565-77j4v\" (UID: \"181c94c0-135e-4fdf-ab39-fd0326a29b74\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399565-77j4v" Nov 24 08:45:00 crc kubenswrapper[4691]: I1124 08:45:00.460247 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hxdww\" (UniqueName: \"kubernetes.io/projected/181c94c0-135e-4fdf-ab39-fd0326a29b74-kube-api-access-hxdww\") pod \"collect-profiles-29399565-77j4v\" (UID: \"181c94c0-135e-4fdf-ab39-fd0326a29b74\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399565-77j4v" Nov 24 08:45:00 crc kubenswrapper[4691]: I1124 08:45:00.491388 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399565-77j4v" Nov 24 08:45:00 crc kubenswrapper[4691]: I1124 08:45:00.942649 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399565-77j4v"] Nov 24 08:45:01 crc kubenswrapper[4691]: I1124 08:45:01.267154 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399565-77j4v" event={"ID":"181c94c0-135e-4fdf-ab39-fd0326a29b74","Type":"ContainerStarted","Data":"b737ee701dbd48044a7f68c307e1ec912aab41467076cbbd5c627b2ab059476b"} Nov 24 08:45:01 crc kubenswrapper[4691]: I1124 08:45:01.267544 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399565-77j4v" event={"ID":"181c94c0-135e-4fdf-ab39-fd0326a29b74","Type":"ContainerStarted","Data":"c63b1d203e430878b49817f2483708a8c4b1b560d7d323a04f9ce8c40eb207da"} Nov 24 08:45:01 crc kubenswrapper[4691]: I1124 08:45:01.288740 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29399565-77j4v" podStartSLOduration=1.288720826 podStartE2EDuration="1.288720826s" podCreationTimestamp="2025-11-24 08:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 08:45:01.28707225 +0000 UTC m=+2863.286021499" watchObservedRunningTime="2025-11-24 08:45:01.288720826 +0000 UTC m=+2863.287670075" Nov 24 08:45:02 crc kubenswrapper[4691]: I1124 08:45:02.282745 4691 generic.go:334] "Generic (PLEG): container finished" podID="181c94c0-135e-4fdf-ab39-fd0326a29b74" containerID="b737ee701dbd48044a7f68c307e1ec912aab41467076cbbd5c627b2ab059476b" exitCode=0 Nov 24 08:45:02 crc kubenswrapper[4691]: I1124 08:45:02.282839 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399565-77j4v" event={"ID":"181c94c0-135e-4fdf-ab39-fd0326a29b74","Type":"ContainerDied","Data":"b737ee701dbd48044a7f68c307e1ec912aab41467076cbbd5c627b2ab059476b"} Nov 24 08:45:03 crc kubenswrapper[4691]: I1124 08:45:03.772676 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399565-77j4v" Nov 24 08:45:03 crc kubenswrapper[4691]: I1124 08:45:03.902863 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/181c94c0-135e-4fdf-ab39-fd0326a29b74-secret-volume\") pod \"181c94c0-135e-4fdf-ab39-fd0326a29b74\" (UID: \"181c94c0-135e-4fdf-ab39-fd0326a29b74\") " Nov 24 08:45:03 crc kubenswrapper[4691]: I1124 08:45:03.903976 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/181c94c0-135e-4fdf-ab39-fd0326a29b74-config-volume\") pod \"181c94c0-135e-4fdf-ab39-fd0326a29b74\" (UID: \"181c94c0-135e-4fdf-ab39-fd0326a29b74\") " Nov 24 08:45:03 crc kubenswrapper[4691]: I1124 08:45:03.904350 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hxdww\" (UniqueName: \"kubernetes.io/projected/181c94c0-135e-4fdf-ab39-fd0326a29b74-kube-api-access-hxdww\") pod \"181c94c0-135e-4fdf-ab39-fd0326a29b74\" (UID: \"181c94c0-135e-4fdf-ab39-fd0326a29b74\") " Nov 24 08:45:03 crc kubenswrapper[4691]: I1124 08:45:03.904497 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/181c94c0-135e-4fdf-ab39-fd0326a29b74-config-volume" (OuterVolumeSpecName: "config-volume") pod "181c94c0-135e-4fdf-ab39-fd0326a29b74" (UID: "181c94c0-135e-4fdf-ab39-fd0326a29b74"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 08:45:03 crc kubenswrapper[4691]: I1124 08:45:03.905938 4691 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/181c94c0-135e-4fdf-ab39-fd0326a29b74-config-volume\") on node \"crc\" DevicePath \"\"" Nov 24 08:45:03 crc kubenswrapper[4691]: I1124 08:45:03.910124 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/181c94c0-135e-4fdf-ab39-fd0326a29b74-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "181c94c0-135e-4fdf-ab39-fd0326a29b74" (UID: "181c94c0-135e-4fdf-ab39-fd0326a29b74"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 08:45:03 crc kubenswrapper[4691]: I1124 08:45:03.921677 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/181c94c0-135e-4fdf-ab39-fd0326a29b74-kube-api-access-hxdww" (OuterVolumeSpecName: "kube-api-access-hxdww") pod "181c94c0-135e-4fdf-ab39-fd0326a29b74" (UID: "181c94c0-135e-4fdf-ab39-fd0326a29b74"). InnerVolumeSpecName "kube-api-access-hxdww". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:45:04 crc kubenswrapper[4691]: I1124 08:45:04.007335 4691 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/181c94c0-135e-4fdf-ab39-fd0326a29b74-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 24 08:45:04 crc kubenswrapper[4691]: I1124 08:45:04.007376 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hxdww\" (UniqueName: \"kubernetes.io/projected/181c94c0-135e-4fdf-ab39-fd0326a29b74-kube-api-access-hxdww\") on node \"crc\" DevicePath \"\"" Nov 24 08:45:04 crc kubenswrapper[4691]: I1124 08:45:04.300590 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399565-77j4v" event={"ID":"181c94c0-135e-4fdf-ab39-fd0326a29b74","Type":"ContainerDied","Data":"c63b1d203e430878b49817f2483708a8c4b1b560d7d323a04f9ce8c40eb207da"} Nov 24 08:45:04 crc kubenswrapper[4691]: I1124 08:45:04.300643 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c63b1d203e430878b49817f2483708a8c4b1b560d7d323a04f9ce8c40eb207da" Nov 24 08:45:04 crc kubenswrapper[4691]: I1124 08:45:04.300643 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399565-77j4v" Nov 24 08:45:04 crc kubenswrapper[4691]: I1124 08:45:04.384603 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399520-x8l49"] Nov 24 08:45:04 crc kubenswrapper[4691]: I1124 08:45:04.386016 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399520-x8l49"] Nov 24 08:45:04 crc kubenswrapper[4691]: I1124 08:45:04.775487 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="72c587e8-bc91-4e8f-a545-e01d47139d1d" path="/var/lib/kubelet/pods/72c587e8-bc91-4e8f-a545-e01d47139d1d/volumes" Nov 24 08:45:21 crc kubenswrapper[4691]: I1124 08:45:21.089271 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:45:21 crc kubenswrapper[4691]: I1124 08:45:21.090192 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:45:23 crc kubenswrapper[4691]: I1124 08:45:23.879688 4691 scope.go:117] "RemoveContainer" containerID="b66da3efaff2f4a42d58550587f53890f6a61eb998bd1c65fb6cf3a611c0a8c0" Nov 24 08:45:51 crc kubenswrapper[4691]: I1124 08:45:51.089962 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:45:51 crc kubenswrapper[4691]: I1124 08:45:51.090546 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:45:51 crc kubenswrapper[4691]: I1124 08:45:51.090584 4691 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 08:45:51 crc kubenswrapper[4691]: I1124 08:45:51.091297 4691 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"df430d301d5c839b2ad1ca52000af690a54babae6365e16b81264ecabac616c0"} pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 08:45:51 crc kubenswrapper[4691]: I1124 08:45:51.091342 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" containerID="cri-o://df430d301d5c839b2ad1ca52000af690a54babae6365e16b81264ecabac616c0" gracePeriod=600 Nov 24 08:45:51 crc kubenswrapper[4691]: E1124 08:45:51.217338 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:45:51 crc kubenswrapper[4691]: I1124 08:45:51.720829 4691 generic.go:334] "Generic (PLEG): container finished" podID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerID="df430d301d5c839b2ad1ca52000af690a54babae6365e16b81264ecabac616c0" exitCode=0 Nov 24 08:45:51 crc kubenswrapper[4691]: I1124 08:45:51.720928 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerDied","Data":"df430d301d5c839b2ad1ca52000af690a54babae6365e16b81264ecabac616c0"} Nov 24 08:45:51 crc kubenswrapper[4691]: I1124 08:45:51.721340 4691 scope.go:117] "RemoveContainer" containerID="f6d92258bfa625e20cc52564ae622942d4685b3155e92eab89a59d15f2bcf57b" Nov 24 08:45:51 crc kubenswrapper[4691]: I1124 08:45:51.722088 4691 scope.go:117] "RemoveContainer" containerID="df430d301d5c839b2ad1ca52000af690a54babae6365e16b81264ecabac616c0" Nov 24 08:45:51 crc kubenswrapper[4691]: E1124 08:45:51.722481 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:46:06 crc kubenswrapper[4691]: I1124 08:46:06.763058 4691 scope.go:117] "RemoveContainer" containerID="df430d301d5c839b2ad1ca52000af690a54babae6365e16b81264ecabac616c0" Nov 24 08:46:06 crc kubenswrapper[4691]: E1124 08:46:06.763909 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:46:19 crc kubenswrapper[4691]: I1124 08:46:19.762535 4691 scope.go:117] "RemoveContainer" containerID="df430d301d5c839b2ad1ca52000af690a54babae6365e16b81264ecabac616c0" Nov 24 08:46:19 crc kubenswrapper[4691]: E1124 08:46:19.763936 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:46:30 crc kubenswrapper[4691]: I1124 08:46:30.761074 4691 scope.go:117] "RemoveContainer" containerID="df430d301d5c839b2ad1ca52000af690a54babae6365e16b81264ecabac616c0" Nov 24 08:46:30 crc kubenswrapper[4691]: E1124 08:46:30.762151 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:46:42 crc kubenswrapper[4691]: I1124 08:46:42.761753 4691 scope.go:117] "RemoveContainer" containerID="df430d301d5c839b2ad1ca52000af690a54babae6365e16b81264ecabac616c0" Nov 24 08:46:42 crc kubenswrapper[4691]: E1124 08:46:42.763786 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:46:56 crc kubenswrapper[4691]: I1124 08:46:56.761646 4691 scope.go:117] "RemoveContainer" containerID="df430d301d5c839b2ad1ca52000af690a54babae6365e16b81264ecabac616c0" Nov 24 08:46:56 crc kubenswrapper[4691]: E1124 08:46:56.765135 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:47:07 crc kubenswrapper[4691]: I1124 08:47:07.760851 4691 scope.go:117] "RemoveContainer" containerID="df430d301d5c839b2ad1ca52000af690a54babae6365e16b81264ecabac616c0" Nov 24 08:47:07 crc kubenswrapper[4691]: E1124 08:47:07.761608 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:47:19 crc kubenswrapper[4691]: I1124 08:47:19.760412 4691 scope.go:117] "RemoveContainer" containerID="df430d301d5c839b2ad1ca52000af690a54babae6365e16b81264ecabac616c0" Nov 24 08:47:19 crc kubenswrapper[4691]: E1124 08:47:19.761609 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:47:31 crc kubenswrapper[4691]: I1124 08:47:31.761626 4691 scope.go:117] "RemoveContainer" containerID="df430d301d5c839b2ad1ca52000af690a54babae6365e16b81264ecabac616c0" Nov 24 08:47:31 crc kubenswrapper[4691]: E1124 08:47:31.762438 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:47:44 crc kubenswrapper[4691]: I1124 08:47:44.760744 4691 scope.go:117] "RemoveContainer" containerID="df430d301d5c839b2ad1ca52000af690a54babae6365e16b81264ecabac616c0" Nov 24 08:47:44 crc kubenswrapper[4691]: E1124 08:47:44.761679 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:47:57 crc kubenswrapper[4691]: I1124 08:47:57.760567 4691 scope.go:117] "RemoveContainer" containerID="df430d301d5c839b2ad1ca52000af690a54babae6365e16b81264ecabac616c0" Nov 24 08:47:57 crc kubenswrapper[4691]: E1124 08:47:57.762627 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:48:09 crc kubenswrapper[4691]: I1124 08:48:09.760799 4691 scope.go:117] "RemoveContainer" containerID="df430d301d5c839b2ad1ca52000af690a54babae6365e16b81264ecabac616c0" Nov 24 08:48:09 crc kubenswrapper[4691]: E1124 08:48:09.761497 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:48:23 crc kubenswrapper[4691]: I1124 08:48:23.761626 4691 scope.go:117] "RemoveContainer" containerID="df430d301d5c839b2ad1ca52000af690a54babae6365e16b81264ecabac616c0" Nov 24 08:48:23 crc kubenswrapper[4691]: E1124 08:48:23.762582 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:48:35 crc kubenswrapper[4691]: I1124 08:48:35.761972 4691 scope.go:117] "RemoveContainer" containerID="df430d301d5c839b2ad1ca52000af690a54babae6365e16b81264ecabac616c0" Nov 24 08:48:35 crc kubenswrapper[4691]: E1124 08:48:35.763491 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:48:47 crc kubenswrapper[4691]: I1124 08:48:47.762694 4691 scope.go:117] "RemoveContainer" containerID="df430d301d5c839b2ad1ca52000af690a54babae6365e16b81264ecabac616c0" Nov 24 08:48:47 crc kubenswrapper[4691]: E1124 08:48:47.764132 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:49:02 crc kubenswrapper[4691]: I1124 08:49:02.761200 4691 scope.go:117] "RemoveContainer" containerID="df430d301d5c839b2ad1ca52000af690a54babae6365e16b81264ecabac616c0" Nov 24 08:49:02 crc kubenswrapper[4691]: E1124 08:49:02.762177 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:49:14 crc kubenswrapper[4691]: I1124 08:49:14.760908 4691 scope.go:117] "RemoveContainer" containerID="df430d301d5c839b2ad1ca52000af690a54babae6365e16b81264ecabac616c0" Nov 24 08:49:14 crc kubenswrapper[4691]: E1124 08:49:14.761694 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:49:25 crc kubenswrapper[4691]: I1124 08:49:25.761529 4691 scope.go:117] "RemoveContainer" containerID="df430d301d5c839b2ad1ca52000af690a54babae6365e16b81264ecabac616c0" Nov 24 08:49:25 crc kubenswrapper[4691]: E1124 08:49:25.762573 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:49:38 crc kubenswrapper[4691]: I1124 08:49:38.769285 4691 scope.go:117] "RemoveContainer" containerID="df430d301d5c839b2ad1ca52000af690a54babae6365e16b81264ecabac616c0" Nov 24 08:49:38 crc kubenswrapper[4691]: E1124 08:49:38.773521 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:49:50 crc kubenswrapper[4691]: I1124 08:49:50.761316 4691 scope.go:117] "RemoveContainer" containerID="df430d301d5c839b2ad1ca52000af690a54babae6365e16b81264ecabac616c0" Nov 24 08:49:50 crc kubenswrapper[4691]: E1124 08:49:50.762251 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:50:03 crc kubenswrapper[4691]: I1124 08:50:03.761126 4691 scope.go:117] "RemoveContainer" containerID="df430d301d5c839b2ad1ca52000af690a54babae6365e16b81264ecabac616c0" Nov 24 08:50:03 crc kubenswrapper[4691]: E1124 08:50:03.761978 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:50:14 crc kubenswrapper[4691]: I1124 08:50:14.760715 4691 scope.go:117] "RemoveContainer" containerID="df430d301d5c839b2ad1ca52000af690a54babae6365e16b81264ecabac616c0" Nov 24 08:50:14 crc kubenswrapper[4691]: E1124 08:50:14.761409 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:50:28 crc kubenswrapper[4691]: I1124 08:50:28.588341 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-84pm9"] Nov 24 08:50:28 crc kubenswrapper[4691]: E1124 08:50:28.589334 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="181c94c0-135e-4fdf-ab39-fd0326a29b74" containerName="collect-profiles" Nov 24 08:50:28 crc kubenswrapper[4691]: I1124 08:50:28.589349 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="181c94c0-135e-4fdf-ab39-fd0326a29b74" containerName="collect-profiles" Nov 24 08:50:28 crc kubenswrapper[4691]: I1124 08:50:28.589565 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="181c94c0-135e-4fdf-ab39-fd0326a29b74" containerName="collect-profiles" Nov 24 08:50:28 crc kubenswrapper[4691]: I1124 08:50:28.591558 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-84pm9" Nov 24 08:50:28 crc kubenswrapper[4691]: I1124 08:50:28.613534 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-84pm9"] Nov 24 08:50:28 crc kubenswrapper[4691]: I1124 08:50:28.713084 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbc4d0b6-d565-49be-837f-cfc2da1c812b-utilities\") pod \"redhat-operators-84pm9\" (UID: \"dbc4d0b6-d565-49be-837f-cfc2da1c812b\") " pod="openshift-marketplace/redhat-operators-84pm9" Nov 24 08:50:28 crc kubenswrapper[4691]: I1124 08:50:28.714627 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f7bdj\" (UniqueName: \"kubernetes.io/projected/dbc4d0b6-d565-49be-837f-cfc2da1c812b-kube-api-access-f7bdj\") pod \"redhat-operators-84pm9\" (UID: \"dbc4d0b6-d565-49be-837f-cfc2da1c812b\") " pod="openshift-marketplace/redhat-operators-84pm9" Nov 24 08:50:28 crc kubenswrapper[4691]: I1124 08:50:28.714817 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbc4d0b6-d565-49be-837f-cfc2da1c812b-catalog-content\") pod \"redhat-operators-84pm9\" (UID: \"dbc4d0b6-d565-49be-837f-cfc2da1c812b\") " pod="openshift-marketplace/redhat-operators-84pm9" Nov 24 08:50:28 crc kubenswrapper[4691]: I1124 08:50:28.777049 4691 scope.go:117] "RemoveContainer" containerID="df430d301d5c839b2ad1ca52000af690a54babae6365e16b81264ecabac616c0" Nov 24 08:50:28 crc kubenswrapper[4691]: E1124 08:50:28.777334 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:50:28 crc kubenswrapper[4691]: I1124 08:50:28.816901 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbc4d0b6-d565-49be-837f-cfc2da1c812b-catalog-content\") pod \"redhat-operators-84pm9\" (UID: \"dbc4d0b6-d565-49be-837f-cfc2da1c812b\") " pod="openshift-marketplace/redhat-operators-84pm9" Nov 24 08:50:28 crc kubenswrapper[4691]: I1124 08:50:28.817041 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbc4d0b6-d565-49be-837f-cfc2da1c812b-utilities\") pod \"redhat-operators-84pm9\" (UID: \"dbc4d0b6-d565-49be-837f-cfc2da1c812b\") " pod="openshift-marketplace/redhat-operators-84pm9" Nov 24 08:50:28 crc kubenswrapper[4691]: I1124 08:50:28.817262 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f7bdj\" (UniqueName: \"kubernetes.io/projected/dbc4d0b6-d565-49be-837f-cfc2da1c812b-kube-api-access-f7bdj\") pod \"redhat-operators-84pm9\" (UID: \"dbc4d0b6-d565-49be-837f-cfc2da1c812b\") " pod="openshift-marketplace/redhat-operators-84pm9" Nov 24 08:50:28 crc kubenswrapper[4691]: I1124 08:50:28.817404 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbc4d0b6-d565-49be-837f-cfc2da1c812b-catalog-content\") pod \"redhat-operators-84pm9\" (UID: \"dbc4d0b6-d565-49be-837f-cfc2da1c812b\") " pod="openshift-marketplace/redhat-operators-84pm9" Nov 24 08:50:28 crc kubenswrapper[4691]: I1124 08:50:28.818250 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbc4d0b6-d565-49be-837f-cfc2da1c812b-utilities\") pod \"redhat-operators-84pm9\" (UID: \"dbc4d0b6-d565-49be-837f-cfc2da1c812b\") " pod="openshift-marketplace/redhat-operators-84pm9" Nov 24 08:50:28 crc kubenswrapper[4691]: I1124 08:50:28.846868 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f7bdj\" (UniqueName: \"kubernetes.io/projected/dbc4d0b6-d565-49be-837f-cfc2da1c812b-kube-api-access-f7bdj\") pod \"redhat-operators-84pm9\" (UID: \"dbc4d0b6-d565-49be-837f-cfc2da1c812b\") " pod="openshift-marketplace/redhat-operators-84pm9" Nov 24 08:50:28 crc kubenswrapper[4691]: I1124 08:50:28.926894 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-84pm9" Nov 24 08:50:29 crc kubenswrapper[4691]: I1124 08:50:29.405660 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-84pm9"] Nov 24 08:50:30 crc kubenswrapper[4691]: I1124 08:50:30.409488 4691 generic.go:334] "Generic (PLEG): container finished" podID="dbc4d0b6-d565-49be-837f-cfc2da1c812b" containerID="7d218d4071add65c50c5bd2d7b73abd73f28eddd5a2346b52f03e45f39b3e501" exitCode=0 Nov 24 08:50:30 crc kubenswrapper[4691]: I1124 08:50:30.409861 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-84pm9" event={"ID":"dbc4d0b6-d565-49be-837f-cfc2da1c812b","Type":"ContainerDied","Data":"7d218d4071add65c50c5bd2d7b73abd73f28eddd5a2346b52f03e45f39b3e501"} Nov 24 08:50:30 crc kubenswrapper[4691]: I1124 08:50:30.409962 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-84pm9" event={"ID":"dbc4d0b6-d565-49be-837f-cfc2da1c812b","Type":"ContainerStarted","Data":"0f3a0e1bde12450a3a5b971b9d8ee7046cfb50007d6ba7a707ef3700e6abf0c3"} Nov 24 08:50:30 crc kubenswrapper[4691]: I1124 08:50:30.412927 4691 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 08:50:31 crc kubenswrapper[4691]: I1124 08:50:31.422259 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-84pm9" event={"ID":"dbc4d0b6-d565-49be-837f-cfc2da1c812b","Type":"ContainerStarted","Data":"04ee3915a1199d70514933001cc9889c6943f4671e3ada1c48e418acd11a4928"} Nov 24 08:50:32 crc kubenswrapper[4691]: I1124 08:50:32.432503 4691 generic.go:334] "Generic (PLEG): container finished" podID="dbc4d0b6-d565-49be-837f-cfc2da1c812b" containerID="04ee3915a1199d70514933001cc9889c6943f4671e3ada1c48e418acd11a4928" exitCode=0 Nov 24 08:50:32 crc kubenswrapper[4691]: I1124 08:50:32.432556 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-84pm9" event={"ID":"dbc4d0b6-d565-49be-837f-cfc2da1c812b","Type":"ContainerDied","Data":"04ee3915a1199d70514933001cc9889c6943f4671e3ada1c48e418acd11a4928"} Nov 24 08:50:33 crc kubenswrapper[4691]: I1124 08:50:33.443934 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-84pm9" event={"ID":"dbc4d0b6-d565-49be-837f-cfc2da1c812b","Type":"ContainerStarted","Data":"f470eb4a080ac62fb2e29d039ee5bd9f4fe117b5962f58828a70d5bc17350222"} Nov 24 08:50:33 crc kubenswrapper[4691]: I1124 08:50:33.463409 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-84pm9" podStartSLOduration=3.040944643 podStartE2EDuration="5.463391188s" podCreationTimestamp="2025-11-24 08:50:28 +0000 UTC" firstStartedPulling="2025-11-24 08:50:30.412561878 +0000 UTC m=+3192.411511147" lastFinishedPulling="2025-11-24 08:50:32.835008443 +0000 UTC m=+3194.833957692" observedRunningTime="2025-11-24 08:50:33.459809356 +0000 UTC m=+3195.458758625" watchObservedRunningTime="2025-11-24 08:50:33.463391188 +0000 UTC m=+3195.462340437" Nov 24 08:50:38 crc kubenswrapper[4691]: I1124 08:50:38.927951 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-84pm9" Nov 24 08:50:38 crc kubenswrapper[4691]: I1124 08:50:38.929569 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-84pm9" Nov 24 08:50:38 crc kubenswrapper[4691]: I1124 08:50:38.977659 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-84pm9" Nov 24 08:50:39 crc kubenswrapper[4691]: I1124 08:50:39.537884 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-84pm9" Nov 24 08:50:39 crc kubenswrapper[4691]: I1124 08:50:39.767302 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-84pm9"] Nov 24 08:50:41 crc kubenswrapper[4691]: I1124 08:50:41.507556 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-84pm9" podUID="dbc4d0b6-d565-49be-837f-cfc2da1c812b" containerName="registry-server" containerID="cri-o://f470eb4a080ac62fb2e29d039ee5bd9f4fe117b5962f58828a70d5bc17350222" gracePeriod=2 Nov 24 08:50:42 crc kubenswrapper[4691]: I1124 08:50:42.153479 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-84pm9" Nov 24 08:50:42 crc kubenswrapper[4691]: I1124 08:50:42.265590 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbc4d0b6-d565-49be-837f-cfc2da1c812b-utilities\") pod \"dbc4d0b6-d565-49be-837f-cfc2da1c812b\" (UID: \"dbc4d0b6-d565-49be-837f-cfc2da1c812b\") " Nov 24 08:50:42 crc kubenswrapper[4691]: I1124 08:50:42.265658 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbc4d0b6-d565-49be-837f-cfc2da1c812b-catalog-content\") pod \"dbc4d0b6-d565-49be-837f-cfc2da1c812b\" (UID: \"dbc4d0b6-d565-49be-837f-cfc2da1c812b\") " Nov 24 08:50:42 crc kubenswrapper[4691]: I1124 08:50:42.265734 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f7bdj\" (UniqueName: \"kubernetes.io/projected/dbc4d0b6-d565-49be-837f-cfc2da1c812b-kube-api-access-f7bdj\") pod \"dbc4d0b6-d565-49be-837f-cfc2da1c812b\" (UID: \"dbc4d0b6-d565-49be-837f-cfc2da1c812b\") " Nov 24 08:50:42 crc kubenswrapper[4691]: I1124 08:50:42.266948 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dbc4d0b6-d565-49be-837f-cfc2da1c812b-utilities" (OuterVolumeSpecName: "utilities") pod "dbc4d0b6-d565-49be-837f-cfc2da1c812b" (UID: "dbc4d0b6-d565-49be-837f-cfc2da1c812b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:50:42 crc kubenswrapper[4691]: I1124 08:50:42.273751 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dbc4d0b6-d565-49be-837f-cfc2da1c812b-kube-api-access-f7bdj" (OuterVolumeSpecName: "kube-api-access-f7bdj") pod "dbc4d0b6-d565-49be-837f-cfc2da1c812b" (UID: "dbc4d0b6-d565-49be-837f-cfc2da1c812b"). InnerVolumeSpecName "kube-api-access-f7bdj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:50:42 crc kubenswrapper[4691]: I1124 08:50:42.368051 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbc4d0b6-d565-49be-837f-cfc2da1c812b-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 08:50:42 crc kubenswrapper[4691]: I1124 08:50:42.368085 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f7bdj\" (UniqueName: \"kubernetes.io/projected/dbc4d0b6-d565-49be-837f-cfc2da1c812b-kube-api-access-f7bdj\") on node \"crc\" DevicePath \"\"" Nov 24 08:50:42 crc kubenswrapper[4691]: I1124 08:50:42.370115 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dbc4d0b6-d565-49be-837f-cfc2da1c812b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dbc4d0b6-d565-49be-837f-cfc2da1c812b" (UID: "dbc4d0b6-d565-49be-837f-cfc2da1c812b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:50:42 crc kubenswrapper[4691]: I1124 08:50:42.470758 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbc4d0b6-d565-49be-837f-cfc2da1c812b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 08:50:42 crc kubenswrapper[4691]: I1124 08:50:42.519105 4691 generic.go:334] "Generic (PLEG): container finished" podID="dbc4d0b6-d565-49be-837f-cfc2da1c812b" containerID="f470eb4a080ac62fb2e29d039ee5bd9f4fe117b5962f58828a70d5bc17350222" exitCode=0 Nov 24 08:50:42 crc kubenswrapper[4691]: I1124 08:50:42.519157 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-84pm9" event={"ID":"dbc4d0b6-d565-49be-837f-cfc2da1c812b","Type":"ContainerDied","Data":"f470eb4a080ac62fb2e29d039ee5bd9f4fe117b5962f58828a70d5bc17350222"} Nov 24 08:50:42 crc kubenswrapper[4691]: I1124 08:50:42.519178 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-84pm9" Nov 24 08:50:42 crc kubenswrapper[4691]: I1124 08:50:42.519187 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-84pm9" event={"ID":"dbc4d0b6-d565-49be-837f-cfc2da1c812b","Type":"ContainerDied","Data":"0f3a0e1bde12450a3a5b971b9d8ee7046cfb50007d6ba7a707ef3700e6abf0c3"} Nov 24 08:50:42 crc kubenswrapper[4691]: I1124 08:50:42.519208 4691 scope.go:117] "RemoveContainer" containerID="f470eb4a080ac62fb2e29d039ee5bd9f4fe117b5962f58828a70d5bc17350222" Nov 24 08:50:42 crc kubenswrapper[4691]: I1124 08:50:42.538787 4691 scope.go:117] "RemoveContainer" containerID="04ee3915a1199d70514933001cc9889c6943f4671e3ada1c48e418acd11a4928" Nov 24 08:50:42 crc kubenswrapper[4691]: I1124 08:50:42.548557 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-84pm9"] Nov 24 08:50:42 crc kubenswrapper[4691]: I1124 08:50:42.558576 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-84pm9"] Nov 24 08:50:42 crc kubenswrapper[4691]: I1124 08:50:42.575088 4691 scope.go:117] "RemoveContainer" containerID="7d218d4071add65c50c5bd2d7b73abd73f28eddd5a2346b52f03e45f39b3e501" Nov 24 08:50:42 crc kubenswrapper[4691]: I1124 08:50:42.607859 4691 scope.go:117] "RemoveContainer" containerID="f470eb4a080ac62fb2e29d039ee5bd9f4fe117b5962f58828a70d5bc17350222" Nov 24 08:50:42 crc kubenswrapper[4691]: E1124 08:50:42.608306 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f470eb4a080ac62fb2e29d039ee5bd9f4fe117b5962f58828a70d5bc17350222\": container with ID starting with f470eb4a080ac62fb2e29d039ee5bd9f4fe117b5962f58828a70d5bc17350222 not found: ID does not exist" containerID="f470eb4a080ac62fb2e29d039ee5bd9f4fe117b5962f58828a70d5bc17350222" Nov 24 08:50:42 crc kubenswrapper[4691]: I1124 08:50:42.608340 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f470eb4a080ac62fb2e29d039ee5bd9f4fe117b5962f58828a70d5bc17350222"} err="failed to get container status \"f470eb4a080ac62fb2e29d039ee5bd9f4fe117b5962f58828a70d5bc17350222\": rpc error: code = NotFound desc = could not find container \"f470eb4a080ac62fb2e29d039ee5bd9f4fe117b5962f58828a70d5bc17350222\": container with ID starting with f470eb4a080ac62fb2e29d039ee5bd9f4fe117b5962f58828a70d5bc17350222 not found: ID does not exist" Nov 24 08:50:42 crc kubenswrapper[4691]: I1124 08:50:42.608362 4691 scope.go:117] "RemoveContainer" containerID="04ee3915a1199d70514933001cc9889c6943f4671e3ada1c48e418acd11a4928" Nov 24 08:50:42 crc kubenswrapper[4691]: E1124 08:50:42.609689 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"04ee3915a1199d70514933001cc9889c6943f4671e3ada1c48e418acd11a4928\": container with ID starting with 04ee3915a1199d70514933001cc9889c6943f4671e3ada1c48e418acd11a4928 not found: ID does not exist" containerID="04ee3915a1199d70514933001cc9889c6943f4671e3ada1c48e418acd11a4928" Nov 24 08:50:42 crc kubenswrapper[4691]: I1124 08:50:42.609745 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"04ee3915a1199d70514933001cc9889c6943f4671e3ada1c48e418acd11a4928"} err="failed to get container status \"04ee3915a1199d70514933001cc9889c6943f4671e3ada1c48e418acd11a4928\": rpc error: code = NotFound desc = could not find container \"04ee3915a1199d70514933001cc9889c6943f4671e3ada1c48e418acd11a4928\": container with ID starting with 04ee3915a1199d70514933001cc9889c6943f4671e3ada1c48e418acd11a4928 not found: ID does not exist" Nov 24 08:50:42 crc kubenswrapper[4691]: I1124 08:50:42.609777 4691 scope.go:117] "RemoveContainer" containerID="7d218d4071add65c50c5bd2d7b73abd73f28eddd5a2346b52f03e45f39b3e501" Nov 24 08:50:42 crc kubenswrapper[4691]: E1124 08:50:42.610172 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d218d4071add65c50c5bd2d7b73abd73f28eddd5a2346b52f03e45f39b3e501\": container with ID starting with 7d218d4071add65c50c5bd2d7b73abd73f28eddd5a2346b52f03e45f39b3e501 not found: ID does not exist" containerID="7d218d4071add65c50c5bd2d7b73abd73f28eddd5a2346b52f03e45f39b3e501" Nov 24 08:50:42 crc kubenswrapper[4691]: I1124 08:50:42.610200 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d218d4071add65c50c5bd2d7b73abd73f28eddd5a2346b52f03e45f39b3e501"} err="failed to get container status \"7d218d4071add65c50c5bd2d7b73abd73f28eddd5a2346b52f03e45f39b3e501\": rpc error: code = NotFound desc = could not find container \"7d218d4071add65c50c5bd2d7b73abd73f28eddd5a2346b52f03e45f39b3e501\": container with ID starting with 7d218d4071add65c50c5bd2d7b73abd73f28eddd5a2346b52f03e45f39b3e501 not found: ID does not exist" Nov 24 08:50:42 crc kubenswrapper[4691]: I1124 08:50:42.772599 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dbc4d0b6-d565-49be-837f-cfc2da1c812b" path="/var/lib/kubelet/pods/dbc4d0b6-d565-49be-837f-cfc2da1c812b/volumes" Nov 24 08:50:43 crc kubenswrapper[4691]: I1124 08:50:43.761131 4691 scope.go:117] "RemoveContainer" containerID="df430d301d5c839b2ad1ca52000af690a54babae6365e16b81264ecabac616c0" Nov 24 08:50:43 crc kubenswrapper[4691]: E1124 08:50:43.761483 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:50:58 crc kubenswrapper[4691]: I1124 08:50:58.770146 4691 scope.go:117] "RemoveContainer" containerID="df430d301d5c839b2ad1ca52000af690a54babae6365e16b81264ecabac616c0" Nov 24 08:50:59 crc kubenswrapper[4691]: I1124 08:50:59.664024 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerStarted","Data":"04861cccf7fe3851e474926876ed2c8ee39c213155d7d05264e73515472d3823"} Nov 24 08:52:03 crc kubenswrapper[4691]: I1124 08:52:03.187098 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-zck5c"] Nov 24 08:52:03 crc kubenswrapper[4691]: E1124 08:52:03.190170 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbc4d0b6-d565-49be-837f-cfc2da1c812b" containerName="registry-server" Nov 24 08:52:03 crc kubenswrapper[4691]: I1124 08:52:03.190289 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbc4d0b6-d565-49be-837f-cfc2da1c812b" containerName="registry-server" Nov 24 08:52:03 crc kubenswrapper[4691]: E1124 08:52:03.190388 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbc4d0b6-d565-49be-837f-cfc2da1c812b" containerName="extract-content" Nov 24 08:52:03 crc kubenswrapper[4691]: I1124 08:52:03.190494 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbc4d0b6-d565-49be-837f-cfc2da1c812b" containerName="extract-content" Nov 24 08:52:03 crc kubenswrapper[4691]: E1124 08:52:03.190590 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbc4d0b6-d565-49be-837f-cfc2da1c812b" containerName="extract-utilities" Nov 24 08:52:03 crc kubenswrapper[4691]: I1124 08:52:03.190664 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbc4d0b6-d565-49be-837f-cfc2da1c812b" containerName="extract-utilities" Nov 24 08:52:03 crc kubenswrapper[4691]: I1124 08:52:03.191013 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="dbc4d0b6-d565-49be-837f-cfc2da1c812b" containerName="registry-server" Nov 24 08:52:03 crc kubenswrapper[4691]: I1124 08:52:03.192898 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zck5c" Nov 24 08:52:03 crc kubenswrapper[4691]: I1124 08:52:03.204656 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zck5c"] Nov 24 08:52:03 crc kubenswrapper[4691]: I1124 08:52:03.371298 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8367f71a-0068-4606-8d24-e7ddbb11c98d-utilities\") pod \"community-operators-zck5c\" (UID: \"8367f71a-0068-4606-8d24-e7ddbb11c98d\") " pod="openshift-marketplace/community-operators-zck5c" Nov 24 08:52:03 crc kubenswrapper[4691]: I1124 08:52:03.371634 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6d4kk\" (UniqueName: \"kubernetes.io/projected/8367f71a-0068-4606-8d24-e7ddbb11c98d-kube-api-access-6d4kk\") pod \"community-operators-zck5c\" (UID: \"8367f71a-0068-4606-8d24-e7ddbb11c98d\") " pod="openshift-marketplace/community-operators-zck5c" Nov 24 08:52:03 crc kubenswrapper[4691]: I1124 08:52:03.371783 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8367f71a-0068-4606-8d24-e7ddbb11c98d-catalog-content\") pod \"community-operators-zck5c\" (UID: \"8367f71a-0068-4606-8d24-e7ddbb11c98d\") " pod="openshift-marketplace/community-operators-zck5c" Nov 24 08:52:03 crc kubenswrapper[4691]: I1124 08:52:03.474155 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8367f71a-0068-4606-8d24-e7ddbb11c98d-utilities\") pod \"community-operators-zck5c\" (UID: \"8367f71a-0068-4606-8d24-e7ddbb11c98d\") " pod="openshift-marketplace/community-operators-zck5c" Nov 24 08:52:03 crc kubenswrapper[4691]: I1124 08:52:03.474330 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6d4kk\" (UniqueName: \"kubernetes.io/projected/8367f71a-0068-4606-8d24-e7ddbb11c98d-kube-api-access-6d4kk\") pod \"community-operators-zck5c\" (UID: \"8367f71a-0068-4606-8d24-e7ddbb11c98d\") " pod="openshift-marketplace/community-operators-zck5c" Nov 24 08:52:03 crc kubenswrapper[4691]: I1124 08:52:03.474392 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8367f71a-0068-4606-8d24-e7ddbb11c98d-catalog-content\") pod \"community-operators-zck5c\" (UID: \"8367f71a-0068-4606-8d24-e7ddbb11c98d\") " pod="openshift-marketplace/community-operators-zck5c" Nov 24 08:52:03 crc kubenswrapper[4691]: I1124 08:52:03.474663 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8367f71a-0068-4606-8d24-e7ddbb11c98d-utilities\") pod \"community-operators-zck5c\" (UID: \"8367f71a-0068-4606-8d24-e7ddbb11c98d\") " pod="openshift-marketplace/community-operators-zck5c" Nov 24 08:52:03 crc kubenswrapper[4691]: I1124 08:52:03.474870 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8367f71a-0068-4606-8d24-e7ddbb11c98d-catalog-content\") pod \"community-operators-zck5c\" (UID: \"8367f71a-0068-4606-8d24-e7ddbb11c98d\") " pod="openshift-marketplace/community-operators-zck5c" Nov 24 08:52:03 crc kubenswrapper[4691]: I1124 08:52:03.495839 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6d4kk\" (UniqueName: \"kubernetes.io/projected/8367f71a-0068-4606-8d24-e7ddbb11c98d-kube-api-access-6d4kk\") pod \"community-operators-zck5c\" (UID: \"8367f71a-0068-4606-8d24-e7ddbb11c98d\") " pod="openshift-marketplace/community-operators-zck5c" Nov 24 08:52:03 crc kubenswrapper[4691]: I1124 08:52:03.542648 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zck5c" Nov 24 08:52:04 crc kubenswrapper[4691]: I1124 08:52:04.119140 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zck5c"] Nov 24 08:52:04 crc kubenswrapper[4691]: I1124 08:52:04.240997 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zck5c" event={"ID":"8367f71a-0068-4606-8d24-e7ddbb11c98d","Type":"ContainerStarted","Data":"6d2eee8207f03073d4897739586a89c8adc04e4c9203091e5421f40f9b021283"} Nov 24 08:52:05 crc kubenswrapper[4691]: I1124 08:52:05.253205 4691 generic.go:334] "Generic (PLEG): container finished" podID="8367f71a-0068-4606-8d24-e7ddbb11c98d" containerID="de86c62cae49e349680491ebcbca60e790876b4ad008bb9f7d3d5014a4c31df6" exitCode=0 Nov 24 08:52:05 crc kubenswrapper[4691]: I1124 08:52:05.253393 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zck5c" event={"ID":"8367f71a-0068-4606-8d24-e7ddbb11c98d","Type":"ContainerDied","Data":"de86c62cae49e349680491ebcbca60e790876b4ad008bb9f7d3d5014a4c31df6"} Nov 24 08:52:08 crc kubenswrapper[4691]: I1124 08:52:08.282441 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zck5c" event={"ID":"8367f71a-0068-4606-8d24-e7ddbb11c98d","Type":"ContainerStarted","Data":"b2fa52a36b99ac030c28ae46aec98abb32c25fef205dbf03cad4e4f363923fd7"} Nov 24 08:52:09 crc kubenswrapper[4691]: I1124 08:52:09.294323 4691 generic.go:334] "Generic (PLEG): container finished" podID="8367f71a-0068-4606-8d24-e7ddbb11c98d" containerID="b2fa52a36b99ac030c28ae46aec98abb32c25fef205dbf03cad4e4f363923fd7" exitCode=0 Nov 24 08:52:09 crc kubenswrapper[4691]: I1124 08:52:09.294378 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zck5c" event={"ID":"8367f71a-0068-4606-8d24-e7ddbb11c98d","Type":"ContainerDied","Data":"b2fa52a36b99ac030c28ae46aec98abb32c25fef205dbf03cad4e4f363923fd7"} Nov 24 08:52:10 crc kubenswrapper[4691]: I1124 08:52:10.307178 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zck5c" event={"ID":"8367f71a-0068-4606-8d24-e7ddbb11c98d","Type":"ContainerStarted","Data":"d1e09ca61504fc492f91b77006896f97af01008b85959184c72af1902540d1a1"} Nov 24 08:52:10 crc kubenswrapper[4691]: I1124 08:52:10.335273 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-zck5c" podStartSLOduration=2.595771549 podStartE2EDuration="7.33525133s" podCreationTimestamp="2025-11-24 08:52:03 +0000 UTC" firstStartedPulling="2025-11-24 08:52:05.255896174 +0000 UTC m=+3287.254845433" lastFinishedPulling="2025-11-24 08:52:09.995375965 +0000 UTC m=+3291.994325214" observedRunningTime="2025-11-24 08:52:10.325764462 +0000 UTC m=+3292.324713721" watchObservedRunningTime="2025-11-24 08:52:10.33525133 +0000 UTC m=+3292.334200579" Nov 24 08:52:13 crc kubenswrapper[4691]: I1124 08:52:13.543141 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-zck5c" Nov 24 08:52:13 crc kubenswrapper[4691]: I1124 08:52:13.543517 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-zck5c" Nov 24 08:52:13 crc kubenswrapper[4691]: I1124 08:52:13.607380 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-zck5c" Nov 24 08:52:23 crc kubenswrapper[4691]: I1124 08:52:23.594895 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-zck5c" Nov 24 08:52:23 crc kubenswrapper[4691]: I1124 08:52:23.643741 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zck5c"] Nov 24 08:52:24 crc kubenswrapper[4691]: I1124 08:52:24.429915 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-zck5c" podUID="8367f71a-0068-4606-8d24-e7ddbb11c98d" containerName="registry-server" containerID="cri-o://d1e09ca61504fc492f91b77006896f97af01008b85959184c72af1902540d1a1" gracePeriod=2 Nov 24 08:52:24 crc kubenswrapper[4691]: E1124 08:52:24.547924 4691 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8367f71a_0068_4606_8d24_e7ddbb11c98d.slice/crio-conmon-d1e09ca61504fc492f91b77006896f97af01008b85959184c72af1902540d1a1.scope\": RecentStats: unable to find data in memory cache]" Nov 24 08:52:25 crc kubenswrapper[4691]: I1124 08:52:25.015784 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zck5c" Nov 24 08:52:25 crc kubenswrapper[4691]: I1124 08:52:25.094156 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8367f71a-0068-4606-8d24-e7ddbb11c98d-catalog-content\") pod \"8367f71a-0068-4606-8d24-e7ddbb11c98d\" (UID: \"8367f71a-0068-4606-8d24-e7ddbb11c98d\") " Nov 24 08:52:25 crc kubenswrapper[4691]: I1124 08:52:25.094654 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8367f71a-0068-4606-8d24-e7ddbb11c98d-utilities\") pod \"8367f71a-0068-4606-8d24-e7ddbb11c98d\" (UID: \"8367f71a-0068-4606-8d24-e7ddbb11c98d\") " Nov 24 08:52:25 crc kubenswrapper[4691]: I1124 08:52:25.094829 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6d4kk\" (UniqueName: \"kubernetes.io/projected/8367f71a-0068-4606-8d24-e7ddbb11c98d-kube-api-access-6d4kk\") pod \"8367f71a-0068-4606-8d24-e7ddbb11c98d\" (UID: \"8367f71a-0068-4606-8d24-e7ddbb11c98d\") " Nov 24 08:52:25 crc kubenswrapper[4691]: I1124 08:52:25.095775 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8367f71a-0068-4606-8d24-e7ddbb11c98d-utilities" (OuterVolumeSpecName: "utilities") pod "8367f71a-0068-4606-8d24-e7ddbb11c98d" (UID: "8367f71a-0068-4606-8d24-e7ddbb11c98d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:52:25 crc kubenswrapper[4691]: I1124 08:52:25.096132 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8367f71a-0068-4606-8d24-e7ddbb11c98d-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 08:52:25 crc kubenswrapper[4691]: I1124 08:52:25.102737 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8367f71a-0068-4606-8d24-e7ddbb11c98d-kube-api-access-6d4kk" (OuterVolumeSpecName: "kube-api-access-6d4kk") pod "8367f71a-0068-4606-8d24-e7ddbb11c98d" (UID: "8367f71a-0068-4606-8d24-e7ddbb11c98d"). InnerVolumeSpecName "kube-api-access-6d4kk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:52:25 crc kubenswrapper[4691]: I1124 08:52:25.150764 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8367f71a-0068-4606-8d24-e7ddbb11c98d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8367f71a-0068-4606-8d24-e7ddbb11c98d" (UID: "8367f71a-0068-4606-8d24-e7ddbb11c98d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:52:25 crc kubenswrapper[4691]: I1124 08:52:25.197729 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6d4kk\" (UniqueName: \"kubernetes.io/projected/8367f71a-0068-4606-8d24-e7ddbb11c98d-kube-api-access-6d4kk\") on node \"crc\" DevicePath \"\"" Nov 24 08:52:25 crc kubenswrapper[4691]: I1124 08:52:25.197775 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8367f71a-0068-4606-8d24-e7ddbb11c98d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 08:52:25 crc kubenswrapper[4691]: I1124 08:52:25.443002 4691 generic.go:334] "Generic (PLEG): container finished" podID="8367f71a-0068-4606-8d24-e7ddbb11c98d" containerID="d1e09ca61504fc492f91b77006896f97af01008b85959184c72af1902540d1a1" exitCode=0 Nov 24 08:52:25 crc kubenswrapper[4691]: I1124 08:52:25.443095 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zck5c" event={"ID":"8367f71a-0068-4606-8d24-e7ddbb11c98d","Type":"ContainerDied","Data":"d1e09ca61504fc492f91b77006896f97af01008b85959184c72af1902540d1a1"} Nov 24 08:52:25 crc kubenswrapper[4691]: I1124 08:52:25.443400 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zck5c" event={"ID":"8367f71a-0068-4606-8d24-e7ddbb11c98d","Type":"ContainerDied","Data":"6d2eee8207f03073d4897739586a89c8adc04e4c9203091e5421f40f9b021283"} Nov 24 08:52:25 crc kubenswrapper[4691]: I1124 08:52:25.443425 4691 scope.go:117] "RemoveContainer" containerID="d1e09ca61504fc492f91b77006896f97af01008b85959184c72af1902540d1a1" Nov 24 08:52:25 crc kubenswrapper[4691]: I1124 08:52:25.443194 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zck5c" Nov 24 08:52:25 crc kubenswrapper[4691]: I1124 08:52:25.486701 4691 scope.go:117] "RemoveContainer" containerID="b2fa52a36b99ac030c28ae46aec98abb32c25fef205dbf03cad4e4f363923fd7" Nov 24 08:52:25 crc kubenswrapper[4691]: I1124 08:52:25.488031 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zck5c"] Nov 24 08:52:25 crc kubenswrapper[4691]: I1124 08:52:25.496421 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-zck5c"] Nov 24 08:52:25 crc kubenswrapper[4691]: I1124 08:52:25.505496 4691 scope.go:117] "RemoveContainer" containerID="de86c62cae49e349680491ebcbca60e790876b4ad008bb9f7d3d5014a4c31df6" Nov 24 08:52:25 crc kubenswrapper[4691]: I1124 08:52:25.564160 4691 scope.go:117] "RemoveContainer" containerID="d1e09ca61504fc492f91b77006896f97af01008b85959184c72af1902540d1a1" Nov 24 08:52:25 crc kubenswrapper[4691]: E1124 08:52:25.564803 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d1e09ca61504fc492f91b77006896f97af01008b85959184c72af1902540d1a1\": container with ID starting with d1e09ca61504fc492f91b77006896f97af01008b85959184c72af1902540d1a1 not found: ID does not exist" containerID="d1e09ca61504fc492f91b77006896f97af01008b85959184c72af1902540d1a1" Nov 24 08:52:25 crc kubenswrapper[4691]: I1124 08:52:25.564838 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d1e09ca61504fc492f91b77006896f97af01008b85959184c72af1902540d1a1"} err="failed to get container status \"d1e09ca61504fc492f91b77006896f97af01008b85959184c72af1902540d1a1\": rpc error: code = NotFound desc = could not find container \"d1e09ca61504fc492f91b77006896f97af01008b85959184c72af1902540d1a1\": container with ID starting with d1e09ca61504fc492f91b77006896f97af01008b85959184c72af1902540d1a1 not found: ID does not exist" Nov 24 08:52:25 crc kubenswrapper[4691]: I1124 08:52:25.564860 4691 scope.go:117] "RemoveContainer" containerID="b2fa52a36b99ac030c28ae46aec98abb32c25fef205dbf03cad4e4f363923fd7" Nov 24 08:52:25 crc kubenswrapper[4691]: E1124 08:52:25.565219 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b2fa52a36b99ac030c28ae46aec98abb32c25fef205dbf03cad4e4f363923fd7\": container with ID starting with b2fa52a36b99ac030c28ae46aec98abb32c25fef205dbf03cad4e4f363923fd7 not found: ID does not exist" containerID="b2fa52a36b99ac030c28ae46aec98abb32c25fef205dbf03cad4e4f363923fd7" Nov 24 08:52:25 crc kubenswrapper[4691]: I1124 08:52:25.565242 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b2fa52a36b99ac030c28ae46aec98abb32c25fef205dbf03cad4e4f363923fd7"} err="failed to get container status \"b2fa52a36b99ac030c28ae46aec98abb32c25fef205dbf03cad4e4f363923fd7\": rpc error: code = NotFound desc = could not find container \"b2fa52a36b99ac030c28ae46aec98abb32c25fef205dbf03cad4e4f363923fd7\": container with ID starting with b2fa52a36b99ac030c28ae46aec98abb32c25fef205dbf03cad4e4f363923fd7 not found: ID does not exist" Nov 24 08:52:25 crc kubenswrapper[4691]: I1124 08:52:25.565255 4691 scope.go:117] "RemoveContainer" containerID="de86c62cae49e349680491ebcbca60e790876b4ad008bb9f7d3d5014a4c31df6" Nov 24 08:52:25 crc kubenswrapper[4691]: E1124 08:52:25.565883 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"de86c62cae49e349680491ebcbca60e790876b4ad008bb9f7d3d5014a4c31df6\": container with ID starting with de86c62cae49e349680491ebcbca60e790876b4ad008bb9f7d3d5014a4c31df6 not found: ID does not exist" containerID="de86c62cae49e349680491ebcbca60e790876b4ad008bb9f7d3d5014a4c31df6" Nov 24 08:52:25 crc kubenswrapper[4691]: I1124 08:52:25.565962 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de86c62cae49e349680491ebcbca60e790876b4ad008bb9f7d3d5014a4c31df6"} err="failed to get container status \"de86c62cae49e349680491ebcbca60e790876b4ad008bb9f7d3d5014a4c31df6\": rpc error: code = NotFound desc = could not find container \"de86c62cae49e349680491ebcbca60e790876b4ad008bb9f7d3d5014a4c31df6\": container with ID starting with de86c62cae49e349680491ebcbca60e790876b4ad008bb9f7d3d5014a4c31df6 not found: ID does not exist" Nov 24 08:52:26 crc kubenswrapper[4691]: I1124 08:52:26.772151 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8367f71a-0068-4606-8d24-e7ddbb11c98d" path="/var/lib/kubelet/pods/8367f71a-0068-4606-8d24-e7ddbb11c98d/volumes" Nov 24 08:53:21 crc kubenswrapper[4691]: I1124 08:53:21.089001 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:53:21 crc kubenswrapper[4691]: I1124 08:53:21.089654 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:53:29 crc kubenswrapper[4691]: I1124 08:53:29.176388 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qt7bg"] Nov 24 08:53:29 crc kubenswrapper[4691]: E1124 08:53:29.177374 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8367f71a-0068-4606-8d24-e7ddbb11c98d" containerName="registry-server" Nov 24 08:53:29 crc kubenswrapper[4691]: I1124 08:53:29.177387 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="8367f71a-0068-4606-8d24-e7ddbb11c98d" containerName="registry-server" Nov 24 08:53:29 crc kubenswrapper[4691]: E1124 08:53:29.177400 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8367f71a-0068-4606-8d24-e7ddbb11c98d" containerName="extract-utilities" Nov 24 08:53:29 crc kubenswrapper[4691]: I1124 08:53:29.177406 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="8367f71a-0068-4606-8d24-e7ddbb11c98d" containerName="extract-utilities" Nov 24 08:53:29 crc kubenswrapper[4691]: E1124 08:53:29.177490 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8367f71a-0068-4606-8d24-e7ddbb11c98d" containerName="extract-content" Nov 24 08:53:29 crc kubenswrapper[4691]: I1124 08:53:29.177500 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="8367f71a-0068-4606-8d24-e7ddbb11c98d" containerName="extract-content" Nov 24 08:53:29 crc kubenswrapper[4691]: I1124 08:53:29.177672 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="8367f71a-0068-4606-8d24-e7ddbb11c98d" containerName="registry-server" Nov 24 08:53:29 crc kubenswrapper[4691]: I1124 08:53:29.179256 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qt7bg" Nov 24 08:53:29 crc kubenswrapper[4691]: I1124 08:53:29.187361 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qt7bg"] Nov 24 08:53:29 crc kubenswrapper[4691]: I1124 08:53:29.282435 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tcjjr\" (UniqueName: \"kubernetes.io/projected/b09bd242-cd1e-4480-ba84-b1b3b6b29cbd-kube-api-access-tcjjr\") pod \"certified-operators-qt7bg\" (UID: \"b09bd242-cd1e-4480-ba84-b1b3b6b29cbd\") " pod="openshift-marketplace/certified-operators-qt7bg" Nov 24 08:53:29 crc kubenswrapper[4691]: I1124 08:53:29.282853 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b09bd242-cd1e-4480-ba84-b1b3b6b29cbd-utilities\") pod \"certified-operators-qt7bg\" (UID: \"b09bd242-cd1e-4480-ba84-b1b3b6b29cbd\") " pod="openshift-marketplace/certified-operators-qt7bg" Nov 24 08:53:29 crc kubenswrapper[4691]: I1124 08:53:29.282872 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b09bd242-cd1e-4480-ba84-b1b3b6b29cbd-catalog-content\") pod \"certified-operators-qt7bg\" (UID: \"b09bd242-cd1e-4480-ba84-b1b3b6b29cbd\") " pod="openshift-marketplace/certified-operators-qt7bg" Nov 24 08:53:29 crc kubenswrapper[4691]: I1124 08:53:29.385101 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tcjjr\" (UniqueName: \"kubernetes.io/projected/b09bd242-cd1e-4480-ba84-b1b3b6b29cbd-kube-api-access-tcjjr\") pod \"certified-operators-qt7bg\" (UID: \"b09bd242-cd1e-4480-ba84-b1b3b6b29cbd\") " pod="openshift-marketplace/certified-operators-qt7bg" Nov 24 08:53:29 crc kubenswrapper[4691]: I1124 08:53:29.385273 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b09bd242-cd1e-4480-ba84-b1b3b6b29cbd-utilities\") pod \"certified-operators-qt7bg\" (UID: \"b09bd242-cd1e-4480-ba84-b1b3b6b29cbd\") " pod="openshift-marketplace/certified-operators-qt7bg" Nov 24 08:53:29 crc kubenswrapper[4691]: I1124 08:53:29.385297 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b09bd242-cd1e-4480-ba84-b1b3b6b29cbd-catalog-content\") pod \"certified-operators-qt7bg\" (UID: \"b09bd242-cd1e-4480-ba84-b1b3b6b29cbd\") " pod="openshift-marketplace/certified-operators-qt7bg" Nov 24 08:53:29 crc kubenswrapper[4691]: I1124 08:53:29.385958 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b09bd242-cd1e-4480-ba84-b1b3b6b29cbd-utilities\") pod \"certified-operators-qt7bg\" (UID: \"b09bd242-cd1e-4480-ba84-b1b3b6b29cbd\") " pod="openshift-marketplace/certified-operators-qt7bg" Nov 24 08:53:29 crc kubenswrapper[4691]: I1124 08:53:29.385979 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b09bd242-cd1e-4480-ba84-b1b3b6b29cbd-catalog-content\") pod \"certified-operators-qt7bg\" (UID: \"b09bd242-cd1e-4480-ba84-b1b3b6b29cbd\") " pod="openshift-marketplace/certified-operators-qt7bg" Nov 24 08:53:29 crc kubenswrapper[4691]: I1124 08:53:29.406762 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tcjjr\" (UniqueName: \"kubernetes.io/projected/b09bd242-cd1e-4480-ba84-b1b3b6b29cbd-kube-api-access-tcjjr\") pod \"certified-operators-qt7bg\" (UID: \"b09bd242-cd1e-4480-ba84-b1b3b6b29cbd\") " pod="openshift-marketplace/certified-operators-qt7bg" Nov 24 08:53:29 crc kubenswrapper[4691]: I1124 08:53:29.506043 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qt7bg" Nov 24 08:53:30 crc kubenswrapper[4691]: I1124 08:53:30.118937 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qt7bg"] Nov 24 08:53:31 crc kubenswrapper[4691]: I1124 08:53:31.129026 4691 generic.go:334] "Generic (PLEG): container finished" podID="b09bd242-cd1e-4480-ba84-b1b3b6b29cbd" containerID="ba027401e75b80d6da3265151a3f1bda1708fadc0666362eac336588f71b4544" exitCode=0 Nov 24 08:53:31 crc kubenswrapper[4691]: I1124 08:53:31.129119 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qt7bg" event={"ID":"b09bd242-cd1e-4480-ba84-b1b3b6b29cbd","Type":"ContainerDied","Data":"ba027401e75b80d6da3265151a3f1bda1708fadc0666362eac336588f71b4544"} Nov 24 08:53:31 crc kubenswrapper[4691]: I1124 08:53:31.129430 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qt7bg" event={"ID":"b09bd242-cd1e-4480-ba84-b1b3b6b29cbd","Type":"ContainerStarted","Data":"b274b3ad4e0c3cc332f0c11a7ee479a94a7d65bfa2a7f7d5305313cfb06501b3"} Nov 24 08:53:32 crc kubenswrapper[4691]: I1124 08:53:32.143233 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qt7bg" event={"ID":"b09bd242-cd1e-4480-ba84-b1b3b6b29cbd","Type":"ContainerStarted","Data":"ca37c182c46b95283ac2090682d751b98ca0e2c1bb7bcaf39a789f620eff9664"} Nov 24 08:53:33 crc kubenswrapper[4691]: I1124 08:53:33.154566 4691 generic.go:334] "Generic (PLEG): container finished" podID="b09bd242-cd1e-4480-ba84-b1b3b6b29cbd" containerID="ca37c182c46b95283ac2090682d751b98ca0e2c1bb7bcaf39a789f620eff9664" exitCode=0 Nov 24 08:53:33 crc kubenswrapper[4691]: I1124 08:53:33.154667 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qt7bg" event={"ID":"b09bd242-cd1e-4480-ba84-b1b3b6b29cbd","Type":"ContainerDied","Data":"ca37c182c46b95283ac2090682d751b98ca0e2c1bb7bcaf39a789f620eff9664"} Nov 24 08:53:34 crc kubenswrapper[4691]: I1124 08:53:34.167155 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qt7bg" event={"ID":"b09bd242-cd1e-4480-ba84-b1b3b6b29cbd","Type":"ContainerStarted","Data":"56c3aeae0ee758292fdeea752978c2b802c3d8c39692c5675fde9e959ee81e9a"} Nov 24 08:53:34 crc kubenswrapper[4691]: I1124 08:53:34.191417 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qt7bg" podStartSLOduration=2.73306979 podStartE2EDuration="5.19140042s" podCreationTimestamp="2025-11-24 08:53:29 +0000 UTC" firstStartedPulling="2025-11-24 08:53:31.13312827 +0000 UTC m=+3373.132077519" lastFinishedPulling="2025-11-24 08:53:33.5914589 +0000 UTC m=+3375.590408149" observedRunningTime="2025-11-24 08:53:34.183855627 +0000 UTC m=+3376.182804886" watchObservedRunningTime="2025-11-24 08:53:34.19140042 +0000 UTC m=+3376.190349669" Nov 24 08:53:39 crc kubenswrapper[4691]: I1124 08:53:39.506219 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qt7bg" Nov 24 08:53:39 crc kubenswrapper[4691]: I1124 08:53:39.507833 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qt7bg" Nov 24 08:53:39 crc kubenswrapper[4691]: I1124 08:53:39.552321 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qt7bg" Nov 24 08:53:40 crc kubenswrapper[4691]: I1124 08:53:40.261526 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qt7bg" Nov 24 08:53:40 crc kubenswrapper[4691]: I1124 08:53:40.314859 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qt7bg"] Nov 24 08:53:42 crc kubenswrapper[4691]: I1124 08:53:42.203281 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-wp6dx"] Nov 24 08:53:42 crc kubenswrapper[4691]: I1124 08:53:42.205728 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wp6dx" Nov 24 08:53:42 crc kubenswrapper[4691]: I1124 08:53:42.236724 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-qt7bg" podUID="b09bd242-cd1e-4480-ba84-b1b3b6b29cbd" containerName="registry-server" containerID="cri-o://56c3aeae0ee758292fdeea752978c2b802c3d8c39692c5675fde9e959ee81e9a" gracePeriod=2 Nov 24 08:53:42 crc kubenswrapper[4691]: I1124 08:53:42.238667 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wp6dx"] Nov 24 08:53:42 crc kubenswrapper[4691]: I1124 08:53:42.256063 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37777d49-e2f2-45f6-8ca5-6538db6d391b-catalog-content\") pod \"redhat-marketplace-wp6dx\" (UID: \"37777d49-e2f2-45f6-8ca5-6538db6d391b\") " pod="openshift-marketplace/redhat-marketplace-wp6dx" Nov 24 08:53:42 crc kubenswrapper[4691]: I1124 08:53:42.256231 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37777d49-e2f2-45f6-8ca5-6538db6d391b-utilities\") pod \"redhat-marketplace-wp6dx\" (UID: \"37777d49-e2f2-45f6-8ca5-6538db6d391b\") " pod="openshift-marketplace/redhat-marketplace-wp6dx" Nov 24 08:53:42 crc kubenswrapper[4691]: I1124 08:53:42.256272 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d2nd2\" (UniqueName: \"kubernetes.io/projected/37777d49-e2f2-45f6-8ca5-6538db6d391b-kube-api-access-d2nd2\") pod \"redhat-marketplace-wp6dx\" (UID: \"37777d49-e2f2-45f6-8ca5-6538db6d391b\") " pod="openshift-marketplace/redhat-marketplace-wp6dx" Nov 24 08:53:42 crc kubenswrapper[4691]: I1124 08:53:42.359164 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37777d49-e2f2-45f6-8ca5-6538db6d391b-catalog-content\") pod \"redhat-marketplace-wp6dx\" (UID: \"37777d49-e2f2-45f6-8ca5-6538db6d391b\") " pod="openshift-marketplace/redhat-marketplace-wp6dx" Nov 24 08:53:42 crc kubenswrapper[4691]: I1124 08:53:42.359362 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37777d49-e2f2-45f6-8ca5-6538db6d391b-utilities\") pod \"redhat-marketplace-wp6dx\" (UID: \"37777d49-e2f2-45f6-8ca5-6538db6d391b\") " pod="openshift-marketplace/redhat-marketplace-wp6dx" Nov 24 08:53:42 crc kubenswrapper[4691]: I1124 08:53:42.359404 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d2nd2\" (UniqueName: \"kubernetes.io/projected/37777d49-e2f2-45f6-8ca5-6538db6d391b-kube-api-access-d2nd2\") pod \"redhat-marketplace-wp6dx\" (UID: \"37777d49-e2f2-45f6-8ca5-6538db6d391b\") " pod="openshift-marketplace/redhat-marketplace-wp6dx" Nov 24 08:53:42 crc kubenswrapper[4691]: I1124 08:53:42.360000 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37777d49-e2f2-45f6-8ca5-6538db6d391b-catalog-content\") pod \"redhat-marketplace-wp6dx\" (UID: \"37777d49-e2f2-45f6-8ca5-6538db6d391b\") " pod="openshift-marketplace/redhat-marketplace-wp6dx" Nov 24 08:53:42 crc kubenswrapper[4691]: I1124 08:53:42.360016 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37777d49-e2f2-45f6-8ca5-6538db6d391b-utilities\") pod \"redhat-marketplace-wp6dx\" (UID: \"37777d49-e2f2-45f6-8ca5-6538db6d391b\") " pod="openshift-marketplace/redhat-marketplace-wp6dx" Nov 24 08:53:42 crc kubenswrapper[4691]: I1124 08:53:42.379537 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d2nd2\" (UniqueName: \"kubernetes.io/projected/37777d49-e2f2-45f6-8ca5-6538db6d391b-kube-api-access-d2nd2\") pod \"redhat-marketplace-wp6dx\" (UID: \"37777d49-e2f2-45f6-8ca5-6538db6d391b\") " pod="openshift-marketplace/redhat-marketplace-wp6dx" Nov 24 08:53:42 crc kubenswrapper[4691]: I1124 08:53:42.539015 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wp6dx" Nov 24 08:53:42 crc kubenswrapper[4691]: I1124 08:53:42.889908 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qt7bg" Nov 24 08:53:42 crc kubenswrapper[4691]: I1124 08:53:42.968656 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tcjjr\" (UniqueName: \"kubernetes.io/projected/b09bd242-cd1e-4480-ba84-b1b3b6b29cbd-kube-api-access-tcjjr\") pod \"b09bd242-cd1e-4480-ba84-b1b3b6b29cbd\" (UID: \"b09bd242-cd1e-4480-ba84-b1b3b6b29cbd\") " Nov 24 08:53:42 crc kubenswrapper[4691]: I1124 08:53:42.968746 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b09bd242-cd1e-4480-ba84-b1b3b6b29cbd-utilities\") pod \"b09bd242-cd1e-4480-ba84-b1b3b6b29cbd\" (UID: \"b09bd242-cd1e-4480-ba84-b1b3b6b29cbd\") " Nov 24 08:53:42 crc kubenswrapper[4691]: I1124 08:53:42.968855 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b09bd242-cd1e-4480-ba84-b1b3b6b29cbd-catalog-content\") pod \"b09bd242-cd1e-4480-ba84-b1b3b6b29cbd\" (UID: \"b09bd242-cd1e-4480-ba84-b1b3b6b29cbd\") " Nov 24 08:53:42 crc kubenswrapper[4691]: I1124 08:53:42.969776 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b09bd242-cd1e-4480-ba84-b1b3b6b29cbd-utilities" (OuterVolumeSpecName: "utilities") pod "b09bd242-cd1e-4480-ba84-b1b3b6b29cbd" (UID: "b09bd242-cd1e-4480-ba84-b1b3b6b29cbd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:53:42 crc kubenswrapper[4691]: I1124 08:53:42.973783 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b09bd242-cd1e-4480-ba84-b1b3b6b29cbd-kube-api-access-tcjjr" (OuterVolumeSpecName: "kube-api-access-tcjjr") pod "b09bd242-cd1e-4480-ba84-b1b3b6b29cbd" (UID: "b09bd242-cd1e-4480-ba84-b1b3b6b29cbd"). InnerVolumeSpecName "kube-api-access-tcjjr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:53:43 crc kubenswrapper[4691]: I1124 08:53:43.071714 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tcjjr\" (UniqueName: \"kubernetes.io/projected/b09bd242-cd1e-4480-ba84-b1b3b6b29cbd-kube-api-access-tcjjr\") on node \"crc\" DevicePath \"\"" Nov 24 08:53:43 crc kubenswrapper[4691]: I1124 08:53:43.071757 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b09bd242-cd1e-4480-ba84-b1b3b6b29cbd-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 08:53:43 crc kubenswrapper[4691]: I1124 08:53:43.103734 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wp6dx"] Nov 24 08:53:43 crc kubenswrapper[4691]: W1124 08:53:43.106378 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37777d49_e2f2_45f6_8ca5_6538db6d391b.slice/crio-97c62f479b3411f0286e0e44bdfc2f37e56ac105843feffec6448cfc42e9bda9 WatchSource:0}: Error finding container 97c62f479b3411f0286e0e44bdfc2f37e56ac105843feffec6448cfc42e9bda9: Status 404 returned error can't find the container with id 97c62f479b3411f0286e0e44bdfc2f37e56ac105843feffec6448cfc42e9bda9 Nov 24 08:53:43 crc kubenswrapper[4691]: I1124 08:53:43.246830 4691 generic.go:334] "Generic (PLEG): container finished" podID="b09bd242-cd1e-4480-ba84-b1b3b6b29cbd" containerID="56c3aeae0ee758292fdeea752978c2b802c3d8c39692c5675fde9e959ee81e9a" exitCode=0 Nov 24 08:53:43 crc kubenswrapper[4691]: I1124 08:53:43.246901 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qt7bg" Nov 24 08:53:43 crc kubenswrapper[4691]: I1124 08:53:43.246912 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qt7bg" event={"ID":"b09bd242-cd1e-4480-ba84-b1b3b6b29cbd","Type":"ContainerDied","Data":"56c3aeae0ee758292fdeea752978c2b802c3d8c39692c5675fde9e959ee81e9a"} Nov 24 08:53:43 crc kubenswrapper[4691]: I1124 08:53:43.246967 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qt7bg" event={"ID":"b09bd242-cd1e-4480-ba84-b1b3b6b29cbd","Type":"ContainerDied","Data":"b274b3ad4e0c3cc332f0c11a7ee479a94a7d65bfa2a7f7d5305313cfb06501b3"} Nov 24 08:53:43 crc kubenswrapper[4691]: I1124 08:53:43.247017 4691 scope.go:117] "RemoveContainer" containerID="56c3aeae0ee758292fdeea752978c2b802c3d8c39692c5675fde9e959ee81e9a" Nov 24 08:53:43 crc kubenswrapper[4691]: I1124 08:53:43.248387 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wp6dx" event={"ID":"37777d49-e2f2-45f6-8ca5-6538db6d391b","Type":"ContainerStarted","Data":"97c62f479b3411f0286e0e44bdfc2f37e56ac105843feffec6448cfc42e9bda9"} Nov 24 08:53:43 crc kubenswrapper[4691]: I1124 08:53:43.267264 4691 scope.go:117] "RemoveContainer" containerID="ca37c182c46b95283ac2090682d751b98ca0e2c1bb7bcaf39a789f620eff9664" Nov 24 08:53:43 crc kubenswrapper[4691]: I1124 08:53:43.288888 4691 scope.go:117] "RemoveContainer" containerID="ba027401e75b80d6da3265151a3f1bda1708fadc0666362eac336588f71b4544" Nov 24 08:53:43 crc kubenswrapper[4691]: I1124 08:53:43.311284 4691 scope.go:117] "RemoveContainer" containerID="56c3aeae0ee758292fdeea752978c2b802c3d8c39692c5675fde9e959ee81e9a" Nov 24 08:53:43 crc kubenswrapper[4691]: E1124 08:53:43.311929 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56c3aeae0ee758292fdeea752978c2b802c3d8c39692c5675fde9e959ee81e9a\": container with ID starting with 56c3aeae0ee758292fdeea752978c2b802c3d8c39692c5675fde9e959ee81e9a not found: ID does not exist" containerID="56c3aeae0ee758292fdeea752978c2b802c3d8c39692c5675fde9e959ee81e9a" Nov 24 08:53:43 crc kubenswrapper[4691]: I1124 08:53:43.312016 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56c3aeae0ee758292fdeea752978c2b802c3d8c39692c5675fde9e959ee81e9a"} err="failed to get container status \"56c3aeae0ee758292fdeea752978c2b802c3d8c39692c5675fde9e959ee81e9a\": rpc error: code = NotFound desc = could not find container \"56c3aeae0ee758292fdeea752978c2b802c3d8c39692c5675fde9e959ee81e9a\": container with ID starting with 56c3aeae0ee758292fdeea752978c2b802c3d8c39692c5675fde9e959ee81e9a not found: ID does not exist" Nov 24 08:53:43 crc kubenswrapper[4691]: I1124 08:53:43.312062 4691 scope.go:117] "RemoveContainer" containerID="ca37c182c46b95283ac2090682d751b98ca0e2c1bb7bcaf39a789f620eff9664" Nov 24 08:53:43 crc kubenswrapper[4691]: E1124 08:53:43.312399 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca37c182c46b95283ac2090682d751b98ca0e2c1bb7bcaf39a789f620eff9664\": container with ID starting with ca37c182c46b95283ac2090682d751b98ca0e2c1bb7bcaf39a789f620eff9664 not found: ID does not exist" containerID="ca37c182c46b95283ac2090682d751b98ca0e2c1bb7bcaf39a789f620eff9664" Nov 24 08:53:43 crc kubenswrapper[4691]: I1124 08:53:43.312439 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca37c182c46b95283ac2090682d751b98ca0e2c1bb7bcaf39a789f620eff9664"} err="failed to get container status \"ca37c182c46b95283ac2090682d751b98ca0e2c1bb7bcaf39a789f620eff9664\": rpc error: code = NotFound desc = could not find container \"ca37c182c46b95283ac2090682d751b98ca0e2c1bb7bcaf39a789f620eff9664\": container with ID starting with ca37c182c46b95283ac2090682d751b98ca0e2c1bb7bcaf39a789f620eff9664 not found: ID does not exist" Nov 24 08:53:43 crc kubenswrapper[4691]: I1124 08:53:43.312591 4691 scope.go:117] "RemoveContainer" containerID="ba027401e75b80d6da3265151a3f1bda1708fadc0666362eac336588f71b4544" Nov 24 08:53:43 crc kubenswrapper[4691]: E1124 08:53:43.312907 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba027401e75b80d6da3265151a3f1bda1708fadc0666362eac336588f71b4544\": container with ID starting with ba027401e75b80d6da3265151a3f1bda1708fadc0666362eac336588f71b4544 not found: ID does not exist" containerID="ba027401e75b80d6da3265151a3f1bda1708fadc0666362eac336588f71b4544" Nov 24 08:53:43 crc kubenswrapper[4691]: I1124 08:53:43.312946 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba027401e75b80d6da3265151a3f1bda1708fadc0666362eac336588f71b4544"} err="failed to get container status \"ba027401e75b80d6da3265151a3f1bda1708fadc0666362eac336588f71b4544\": rpc error: code = NotFound desc = could not find container \"ba027401e75b80d6da3265151a3f1bda1708fadc0666362eac336588f71b4544\": container with ID starting with ba027401e75b80d6da3265151a3f1bda1708fadc0666362eac336588f71b4544 not found: ID does not exist" Nov 24 08:53:43 crc kubenswrapper[4691]: I1124 08:53:43.505020 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b09bd242-cd1e-4480-ba84-b1b3b6b29cbd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b09bd242-cd1e-4480-ba84-b1b3b6b29cbd" (UID: "b09bd242-cd1e-4480-ba84-b1b3b6b29cbd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:53:43 crc kubenswrapper[4691]: I1124 08:53:43.581570 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b09bd242-cd1e-4480-ba84-b1b3b6b29cbd-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 08:53:43 crc kubenswrapper[4691]: I1124 08:53:43.609641 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qt7bg"] Nov 24 08:53:43 crc kubenswrapper[4691]: I1124 08:53:43.618571 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-qt7bg"] Nov 24 08:53:44 crc kubenswrapper[4691]: I1124 08:53:44.260474 4691 generic.go:334] "Generic (PLEG): container finished" podID="37777d49-e2f2-45f6-8ca5-6538db6d391b" containerID="b43f17fe1c2741de46fb7ee9e927de00f56dfe317eaca8ccb3d958f86d04ca92" exitCode=0 Nov 24 08:53:44 crc kubenswrapper[4691]: I1124 08:53:44.260579 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wp6dx" event={"ID":"37777d49-e2f2-45f6-8ca5-6538db6d391b","Type":"ContainerDied","Data":"b43f17fe1c2741de46fb7ee9e927de00f56dfe317eaca8ccb3d958f86d04ca92"} Nov 24 08:53:44 crc kubenswrapper[4691]: I1124 08:53:44.772397 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b09bd242-cd1e-4480-ba84-b1b3b6b29cbd" path="/var/lib/kubelet/pods/b09bd242-cd1e-4480-ba84-b1b3b6b29cbd/volumes" Nov 24 08:53:46 crc kubenswrapper[4691]: I1124 08:53:46.284855 4691 generic.go:334] "Generic (PLEG): container finished" podID="37777d49-e2f2-45f6-8ca5-6538db6d391b" containerID="f894ee2fc0f1ff2443b16fa9340aa6b93e2cdb10eb79f136406242765f8aa702" exitCode=0 Nov 24 08:53:46 crc kubenswrapper[4691]: I1124 08:53:46.284944 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wp6dx" event={"ID":"37777d49-e2f2-45f6-8ca5-6538db6d391b","Type":"ContainerDied","Data":"f894ee2fc0f1ff2443b16fa9340aa6b93e2cdb10eb79f136406242765f8aa702"} Nov 24 08:53:47 crc kubenswrapper[4691]: I1124 08:53:47.301466 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wp6dx" event={"ID":"37777d49-e2f2-45f6-8ca5-6538db6d391b","Type":"ContainerStarted","Data":"6ed5ec908e95f711549c6b09eedd7d9d9c2c1be131aa6dc077fcec502198e943"} Nov 24 08:53:47 crc kubenswrapper[4691]: I1124 08:53:47.339355 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-wp6dx" podStartSLOduration=2.844884036 podStartE2EDuration="5.339319198s" podCreationTimestamp="2025-11-24 08:53:42 +0000 UTC" firstStartedPulling="2025-11-24 08:53:44.263433449 +0000 UTC m=+3386.262382688" lastFinishedPulling="2025-11-24 08:53:46.757868591 +0000 UTC m=+3388.756817850" observedRunningTime="2025-11-24 08:53:47.333109292 +0000 UTC m=+3389.332058571" watchObservedRunningTime="2025-11-24 08:53:47.339319198 +0000 UTC m=+3389.338268447" Nov 24 08:53:51 crc kubenswrapper[4691]: I1124 08:53:51.089201 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:53:51 crc kubenswrapper[4691]: I1124 08:53:51.089807 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:53:52 crc kubenswrapper[4691]: I1124 08:53:52.539627 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-wp6dx" Nov 24 08:53:52 crc kubenswrapper[4691]: I1124 08:53:52.539676 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-wp6dx" Nov 24 08:53:52 crc kubenswrapper[4691]: I1124 08:53:52.601016 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-wp6dx" Nov 24 08:53:53 crc kubenswrapper[4691]: I1124 08:53:53.401383 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-wp6dx" Nov 24 08:53:53 crc kubenswrapper[4691]: I1124 08:53:53.446643 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wp6dx"] Nov 24 08:53:55 crc kubenswrapper[4691]: I1124 08:53:55.373165 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-wp6dx" podUID="37777d49-e2f2-45f6-8ca5-6538db6d391b" containerName="registry-server" containerID="cri-o://6ed5ec908e95f711549c6b09eedd7d9d9c2c1be131aa6dc077fcec502198e943" gracePeriod=2 Nov 24 08:53:56 crc kubenswrapper[4691]: I1124 08:53:56.389967 4691 generic.go:334] "Generic (PLEG): container finished" podID="37777d49-e2f2-45f6-8ca5-6538db6d391b" containerID="6ed5ec908e95f711549c6b09eedd7d9d9c2c1be131aa6dc077fcec502198e943" exitCode=0 Nov 24 08:53:56 crc kubenswrapper[4691]: I1124 08:53:56.390497 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wp6dx" event={"ID":"37777d49-e2f2-45f6-8ca5-6538db6d391b","Type":"ContainerDied","Data":"6ed5ec908e95f711549c6b09eedd7d9d9c2c1be131aa6dc077fcec502198e943"} Nov 24 08:53:56 crc kubenswrapper[4691]: I1124 08:53:56.390524 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wp6dx" event={"ID":"37777d49-e2f2-45f6-8ca5-6538db6d391b","Type":"ContainerDied","Data":"97c62f479b3411f0286e0e44bdfc2f37e56ac105843feffec6448cfc42e9bda9"} Nov 24 08:53:56 crc kubenswrapper[4691]: I1124 08:53:56.390535 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="97c62f479b3411f0286e0e44bdfc2f37e56ac105843feffec6448cfc42e9bda9" Nov 24 08:53:56 crc kubenswrapper[4691]: I1124 08:53:56.430392 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wp6dx" Nov 24 08:53:56 crc kubenswrapper[4691]: I1124 08:53:56.542138 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37777d49-e2f2-45f6-8ca5-6538db6d391b-catalog-content\") pod \"37777d49-e2f2-45f6-8ca5-6538db6d391b\" (UID: \"37777d49-e2f2-45f6-8ca5-6538db6d391b\") " Nov 24 08:53:56 crc kubenswrapper[4691]: I1124 08:53:56.542258 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d2nd2\" (UniqueName: \"kubernetes.io/projected/37777d49-e2f2-45f6-8ca5-6538db6d391b-kube-api-access-d2nd2\") pod \"37777d49-e2f2-45f6-8ca5-6538db6d391b\" (UID: \"37777d49-e2f2-45f6-8ca5-6538db6d391b\") " Nov 24 08:53:56 crc kubenswrapper[4691]: I1124 08:53:56.542484 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37777d49-e2f2-45f6-8ca5-6538db6d391b-utilities\") pod \"37777d49-e2f2-45f6-8ca5-6538db6d391b\" (UID: \"37777d49-e2f2-45f6-8ca5-6538db6d391b\") " Nov 24 08:53:56 crc kubenswrapper[4691]: I1124 08:53:56.543597 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37777d49-e2f2-45f6-8ca5-6538db6d391b-utilities" (OuterVolumeSpecName: "utilities") pod "37777d49-e2f2-45f6-8ca5-6538db6d391b" (UID: "37777d49-e2f2-45f6-8ca5-6538db6d391b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:53:56 crc kubenswrapper[4691]: I1124 08:53:56.548816 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37777d49-e2f2-45f6-8ca5-6538db6d391b-kube-api-access-d2nd2" (OuterVolumeSpecName: "kube-api-access-d2nd2") pod "37777d49-e2f2-45f6-8ca5-6538db6d391b" (UID: "37777d49-e2f2-45f6-8ca5-6538db6d391b"). InnerVolumeSpecName "kube-api-access-d2nd2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 08:53:56 crc kubenswrapper[4691]: I1124 08:53:56.559748 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37777d49-e2f2-45f6-8ca5-6538db6d391b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "37777d49-e2f2-45f6-8ca5-6538db6d391b" (UID: "37777d49-e2f2-45f6-8ca5-6538db6d391b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 08:53:56 crc kubenswrapper[4691]: I1124 08:53:56.644710 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37777d49-e2f2-45f6-8ca5-6538db6d391b-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 08:53:56 crc kubenswrapper[4691]: I1124 08:53:56.644750 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37777d49-e2f2-45f6-8ca5-6538db6d391b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 08:53:56 crc kubenswrapper[4691]: I1124 08:53:56.644763 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d2nd2\" (UniqueName: \"kubernetes.io/projected/37777d49-e2f2-45f6-8ca5-6538db6d391b-kube-api-access-d2nd2\") on node \"crc\" DevicePath \"\"" Nov 24 08:53:56 crc kubenswrapper[4691]: E1124 08:53:56.916705 4691 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37777d49_e2f2_45f6_8ca5_6538db6d391b.slice\": RecentStats: unable to find data in memory cache]" Nov 24 08:53:57 crc kubenswrapper[4691]: I1124 08:53:57.397993 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wp6dx" Nov 24 08:53:57 crc kubenswrapper[4691]: I1124 08:53:57.423503 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wp6dx"] Nov 24 08:53:57 crc kubenswrapper[4691]: I1124 08:53:57.434558 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-wp6dx"] Nov 24 08:53:58 crc kubenswrapper[4691]: I1124 08:53:58.776023 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37777d49-e2f2-45f6-8ca5-6538db6d391b" path="/var/lib/kubelet/pods/37777d49-e2f2-45f6-8ca5-6538db6d391b/volumes" Nov 24 08:54:21 crc kubenswrapper[4691]: I1124 08:54:21.089859 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:54:21 crc kubenswrapper[4691]: I1124 08:54:21.090744 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:54:21 crc kubenswrapper[4691]: I1124 08:54:21.090799 4691 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 08:54:21 crc kubenswrapper[4691]: I1124 08:54:21.091641 4691 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"04861cccf7fe3851e474926876ed2c8ee39c213155d7d05264e73515472d3823"} pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 08:54:21 crc kubenswrapper[4691]: I1124 08:54:21.091711 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" containerID="cri-o://04861cccf7fe3851e474926876ed2c8ee39c213155d7d05264e73515472d3823" gracePeriod=600 Nov 24 08:54:21 crc kubenswrapper[4691]: I1124 08:54:21.644656 4691 generic.go:334] "Generic (PLEG): container finished" podID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerID="04861cccf7fe3851e474926876ed2c8ee39c213155d7d05264e73515472d3823" exitCode=0 Nov 24 08:54:21 crc kubenswrapper[4691]: I1124 08:54:21.644687 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerDied","Data":"04861cccf7fe3851e474926876ed2c8ee39c213155d7d05264e73515472d3823"} Nov 24 08:54:21 crc kubenswrapper[4691]: I1124 08:54:21.645325 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerStarted","Data":"3d6ce131b8ac26b1cb7a5759952caf13c375831dd0462ac48f97930734760a1f"} Nov 24 08:54:21 crc kubenswrapper[4691]: I1124 08:54:21.645359 4691 scope.go:117] "RemoveContainer" containerID="df430d301d5c839b2ad1ca52000af690a54babae6365e16b81264ecabac616c0" Nov 24 08:56:21 crc kubenswrapper[4691]: I1124 08:56:21.089406 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:56:21 crc kubenswrapper[4691]: I1124 08:56:21.091566 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:56:51 crc kubenswrapper[4691]: I1124 08:56:51.089344 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:56:51 crc kubenswrapper[4691]: I1124 08:56:51.090000 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:57:02 crc kubenswrapper[4691]: I1124 08:57:02.543345 4691 patch_prober.go:28] interesting pod/package-server-manager-789f6589d5-tqr2v container/package-server-manager namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"http://10.217.0.19:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 24 08:57:02 crc kubenswrapper[4691]: I1124 08:57:02.544097 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tqr2v" podUID="5ccb619a-2f5c-4b42-9dbc-00479b290b3a" containerName="package-server-manager" probeResult="failure" output="Get \"http://10.217.0.19:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 24 08:57:21 crc kubenswrapper[4691]: I1124 08:57:21.089333 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 08:57:21 crc kubenswrapper[4691]: I1124 08:57:21.090131 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 08:57:21 crc kubenswrapper[4691]: I1124 08:57:21.090170 4691 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 08:57:21 crc kubenswrapper[4691]: I1124 08:57:21.091668 4691 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3d6ce131b8ac26b1cb7a5759952caf13c375831dd0462ac48f97930734760a1f"} pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 08:57:21 crc kubenswrapper[4691]: I1124 08:57:21.091738 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" containerID="cri-o://3d6ce131b8ac26b1cb7a5759952caf13c375831dd0462ac48f97930734760a1f" gracePeriod=600 Nov 24 08:57:21 crc kubenswrapper[4691]: E1124 08:57:21.787328 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:57:21 crc kubenswrapper[4691]: I1124 08:57:21.854765 4691 generic.go:334] "Generic (PLEG): container finished" podID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerID="3d6ce131b8ac26b1cb7a5759952caf13c375831dd0462ac48f97930734760a1f" exitCode=0 Nov 24 08:57:21 crc kubenswrapper[4691]: I1124 08:57:21.854859 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerDied","Data":"3d6ce131b8ac26b1cb7a5759952caf13c375831dd0462ac48f97930734760a1f"} Nov 24 08:57:21 crc kubenswrapper[4691]: I1124 08:57:21.855376 4691 scope.go:117] "RemoveContainer" containerID="04861cccf7fe3851e474926876ed2c8ee39c213155d7d05264e73515472d3823" Nov 24 08:57:21 crc kubenswrapper[4691]: I1124 08:57:21.856088 4691 scope.go:117] "RemoveContainer" containerID="3d6ce131b8ac26b1cb7a5759952caf13c375831dd0462ac48f97930734760a1f" Nov 24 08:57:21 crc kubenswrapper[4691]: E1124 08:57:21.856378 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:57:32 crc kubenswrapper[4691]: I1124 08:57:32.762128 4691 scope.go:117] "RemoveContainer" containerID="3d6ce131b8ac26b1cb7a5759952caf13c375831dd0462ac48f97930734760a1f" Nov 24 08:57:32 crc kubenswrapper[4691]: E1124 08:57:32.763410 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:57:47 crc kubenswrapper[4691]: I1124 08:57:47.760780 4691 scope.go:117] "RemoveContainer" containerID="3d6ce131b8ac26b1cb7a5759952caf13c375831dd0462ac48f97930734760a1f" Nov 24 08:57:47 crc kubenswrapper[4691]: E1124 08:57:47.761699 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:58:00 crc kubenswrapper[4691]: I1124 08:58:00.760272 4691 scope.go:117] "RemoveContainer" containerID="3d6ce131b8ac26b1cb7a5759952caf13c375831dd0462ac48f97930734760a1f" Nov 24 08:58:00 crc kubenswrapper[4691]: E1124 08:58:00.761180 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:58:12 crc kubenswrapper[4691]: I1124 08:58:12.760690 4691 scope.go:117] "RemoveContainer" containerID="3d6ce131b8ac26b1cb7a5759952caf13c375831dd0462ac48f97930734760a1f" Nov 24 08:58:12 crc kubenswrapper[4691]: E1124 08:58:12.761982 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:58:23 crc kubenswrapper[4691]: I1124 08:58:23.760810 4691 scope.go:117] "RemoveContainer" containerID="3d6ce131b8ac26b1cb7a5759952caf13c375831dd0462ac48f97930734760a1f" Nov 24 08:58:23 crc kubenswrapper[4691]: E1124 08:58:23.761669 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:58:35 crc kubenswrapper[4691]: I1124 08:58:35.761016 4691 scope.go:117] "RemoveContainer" containerID="3d6ce131b8ac26b1cb7a5759952caf13c375831dd0462ac48f97930734760a1f" Nov 24 08:58:35 crc kubenswrapper[4691]: E1124 08:58:35.762023 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:58:47 crc kubenswrapper[4691]: I1124 08:58:47.761979 4691 scope.go:117] "RemoveContainer" containerID="3d6ce131b8ac26b1cb7a5759952caf13c375831dd0462ac48f97930734760a1f" Nov 24 08:58:47 crc kubenswrapper[4691]: E1124 08:58:47.763970 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:59:02 crc kubenswrapper[4691]: I1124 08:59:02.764811 4691 scope.go:117] "RemoveContainer" containerID="3d6ce131b8ac26b1cb7a5759952caf13c375831dd0462ac48f97930734760a1f" Nov 24 08:59:02 crc kubenswrapper[4691]: E1124 08:59:02.765648 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:59:16 crc kubenswrapper[4691]: I1124 08:59:16.761268 4691 scope.go:117] "RemoveContainer" containerID="3d6ce131b8ac26b1cb7a5759952caf13c375831dd0462ac48f97930734760a1f" Nov 24 08:59:16 crc kubenswrapper[4691]: E1124 08:59:16.762455 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:59:29 crc kubenswrapper[4691]: I1124 08:59:29.761561 4691 scope.go:117] "RemoveContainer" containerID="3d6ce131b8ac26b1cb7a5759952caf13c375831dd0462ac48f97930734760a1f" Nov 24 08:59:29 crc kubenswrapper[4691]: E1124 08:59:29.762882 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:59:44 crc kubenswrapper[4691]: I1124 08:59:44.761504 4691 scope.go:117] "RemoveContainer" containerID="3d6ce131b8ac26b1cb7a5759952caf13c375831dd0462ac48f97930734760a1f" Nov 24 08:59:44 crc kubenswrapper[4691]: E1124 08:59:44.762555 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 08:59:59 crc kubenswrapper[4691]: I1124 08:59:59.760972 4691 scope.go:117] "RemoveContainer" containerID="3d6ce131b8ac26b1cb7a5759952caf13c375831dd0462ac48f97930734760a1f" Nov 24 08:59:59 crc kubenswrapper[4691]: E1124 08:59:59.761910 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:00:00 crc kubenswrapper[4691]: I1124 09:00:00.185501 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399580-wbvw2"] Nov 24 09:00:00 crc kubenswrapper[4691]: E1124 09:00:00.186078 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b09bd242-cd1e-4480-ba84-b1b3b6b29cbd" containerName="extract-utilities" Nov 24 09:00:00 crc kubenswrapper[4691]: I1124 09:00:00.186109 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="b09bd242-cd1e-4480-ba84-b1b3b6b29cbd" containerName="extract-utilities" Nov 24 09:00:00 crc kubenswrapper[4691]: E1124 09:00:00.186142 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37777d49-e2f2-45f6-8ca5-6538db6d391b" containerName="extract-utilities" Nov 24 09:00:00 crc kubenswrapper[4691]: I1124 09:00:00.186156 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="37777d49-e2f2-45f6-8ca5-6538db6d391b" containerName="extract-utilities" Nov 24 09:00:00 crc kubenswrapper[4691]: E1124 09:00:00.186170 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37777d49-e2f2-45f6-8ca5-6538db6d391b" containerName="extract-content" Nov 24 09:00:00 crc kubenswrapper[4691]: I1124 09:00:00.186179 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="37777d49-e2f2-45f6-8ca5-6538db6d391b" containerName="extract-content" Nov 24 09:00:00 crc kubenswrapper[4691]: E1124 09:00:00.186198 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b09bd242-cd1e-4480-ba84-b1b3b6b29cbd" containerName="registry-server" Nov 24 09:00:00 crc kubenswrapper[4691]: I1124 09:00:00.186206 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="b09bd242-cd1e-4480-ba84-b1b3b6b29cbd" containerName="registry-server" Nov 24 09:00:00 crc kubenswrapper[4691]: E1124 09:00:00.186220 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b09bd242-cd1e-4480-ba84-b1b3b6b29cbd" containerName="extract-content" Nov 24 09:00:00 crc kubenswrapper[4691]: I1124 09:00:00.186227 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="b09bd242-cd1e-4480-ba84-b1b3b6b29cbd" containerName="extract-content" Nov 24 09:00:00 crc kubenswrapper[4691]: E1124 09:00:00.186256 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37777d49-e2f2-45f6-8ca5-6538db6d391b" containerName="registry-server" Nov 24 09:00:00 crc kubenswrapper[4691]: I1124 09:00:00.186264 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="37777d49-e2f2-45f6-8ca5-6538db6d391b" containerName="registry-server" Nov 24 09:00:00 crc kubenswrapper[4691]: I1124 09:00:00.186530 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="b09bd242-cd1e-4480-ba84-b1b3b6b29cbd" containerName="registry-server" Nov 24 09:00:00 crc kubenswrapper[4691]: I1124 09:00:00.186560 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="37777d49-e2f2-45f6-8ca5-6538db6d391b" containerName="registry-server" Nov 24 09:00:00 crc kubenswrapper[4691]: I1124 09:00:00.187393 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399580-wbvw2" Nov 24 09:00:00 crc kubenswrapper[4691]: I1124 09:00:00.195850 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 24 09:00:00 crc kubenswrapper[4691]: I1124 09:00:00.195871 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 24 09:00:00 crc kubenswrapper[4691]: I1124 09:00:00.205503 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399580-wbvw2"] Nov 24 09:00:00 crc kubenswrapper[4691]: I1124 09:00:00.250032 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lklz6\" (UniqueName: \"kubernetes.io/projected/57261fdb-23d0-4aa2-bec2-a427e31a8776-kube-api-access-lklz6\") pod \"collect-profiles-29399580-wbvw2\" (UID: \"57261fdb-23d0-4aa2-bec2-a427e31a8776\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399580-wbvw2" Nov 24 09:00:00 crc kubenswrapper[4691]: I1124 09:00:00.250103 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/57261fdb-23d0-4aa2-bec2-a427e31a8776-config-volume\") pod \"collect-profiles-29399580-wbvw2\" (UID: \"57261fdb-23d0-4aa2-bec2-a427e31a8776\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399580-wbvw2" Nov 24 09:00:00 crc kubenswrapper[4691]: I1124 09:00:00.250252 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/57261fdb-23d0-4aa2-bec2-a427e31a8776-secret-volume\") pod \"collect-profiles-29399580-wbvw2\" (UID: \"57261fdb-23d0-4aa2-bec2-a427e31a8776\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399580-wbvw2" Nov 24 09:00:00 crc kubenswrapper[4691]: I1124 09:00:00.352144 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/57261fdb-23d0-4aa2-bec2-a427e31a8776-secret-volume\") pod \"collect-profiles-29399580-wbvw2\" (UID: \"57261fdb-23d0-4aa2-bec2-a427e31a8776\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399580-wbvw2" Nov 24 09:00:00 crc kubenswrapper[4691]: I1124 09:00:00.352645 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lklz6\" (UniqueName: \"kubernetes.io/projected/57261fdb-23d0-4aa2-bec2-a427e31a8776-kube-api-access-lklz6\") pod \"collect-profiles-29399580-wbvw2\" (UID: \"57261fdb-23d0-4aa2-bec2-a427e31a8776\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399580-wbvw2" Nov 24 09:00:00 crc kubenswrapper[4691]: I1124 09:00:00.352695 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/57261fdb-23d0-4aa2-bec2-a427e31a8776-config-volume\") pod \"collect-profiles-29399580-wbvw2\" (UID: \"57261fdb-23d0-4aa2-bec2-a427e31a8776\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399580-wbvw2" Nov 24 09:00:00 crc kubenswrapper[4691]: I1124 09:00:00.353872 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/57261fdb-23d0-4aa2-bec2-a427e31a8776-config-volume\") pod \"collect-profiles-29399580-wbvw2\" (UID: \"57261fdb-23d0-4aa2-bec2-a427e31a8776\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399580-wbvw2" Nov 24 09:00:00 crc kubenswrapper[4691]: I1124 09:00:00.372770 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/57261fdb-23d0-4aa2-bec2-a427e31a8776-secret-volume\") pod \"collect-profiles-29399580-wbvw2\" (UID: \"57261fdb-23d0-4aa2-bec2-a427e31a8776\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399580-wbvw2" Nov 24 09:00:00 crc kubenswrapper[4691]: I1124 09:00:00.373545 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lklz6\" (UniqueName: \"kubernetes.io/projected/57261fdb-23d0-4aa2-bec2-a427e31a8776-kube-api-access-lklz6\") pod \"collect-profiles-29399580-wbvw2\" (UID: \"57261fdb-23d0-4aa2-bec2-a427e31a8776\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399580-wbvw2" Nov 24 09:00:00 crc kubenswrapper[4691]: I1124 09:00:00.531847 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399580-wbvw2" Nov 24 09:00:01 crc kubenswrapper[4691]: I1124 09:00:01.010722 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399580-wbvw2"] Nov 24 09:00:01 crc kubenswrapper[4691]: I1124 09:00:01.347733 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399580-wbvw2" event={"ID":"57261fdb-23d0-4aa2-bec2-a427e31a8776","Type":"ContainerStarted","Data":"309deda882e6ea513264939b88e708b3e9d1522b362f1940f225fb66ef3843d4"} Nov 24 09:00:01 crc kubenswrapper[4691]: I1124 09:00:01.347795 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399580-wbvw2" event={"ID":"57261fdb-23d0-4aa2-bec2-a427e31a8776","Type":"ContainerStarted","Data":"3508d51eb1a7f82d9a3935181f0c22f8878c50e17d99f6a311dcb8f815b3cb33"} Nov 24 09:00:01 crc kubenswrapper[4691]: I1124 09:00:01.370978 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29399580-wbvw2" podStartSLOduration=1.370952825 podStartE2EDuration="1.370952825s" podCreationTimestamp="2025-11-24 09:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 09:00:01.363130403 +0000 UTC m=+3763.362079662" watchObservedRunningTime="2025-11-24 09:00:01.370952825 +0000 UTC m=+3763.369902074" Nov 24 09:00:02 crc kubenswrapper[4691]: I1124 09:00:02.359361 4691 generic.go:334] "Generic (PLEG): container finished" podID="57261fdb-23d0-4aa2-bec2-a427e31a8776" containerID="309deda882e6ea513264939b88e708b3e9d1522b362f1940f225fb66ef3843d4" exitCode=0 Nov 24 09:00:02 crc kubenswrapper[4691]: I1124 09:00:02.359535 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399580-wbvw2" event={"ID":"57261fdb-23d0-4aa2-bec2-a427e31a8776","Type":"ContainerDied","Data":"309deda882e6ea513264939b88e708b3e9d1522b362f1940f225fb66ef3843d4"} Nov 24 09:00:03 crc kubenswrapper[4691]: I1124 09:00:03.813283 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399580-wbvw2" Nov 24 09:00:03 crc kubenswrapper[4691]: I1124 09:00:03.930150 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lklz6\" (UniqueName: \"kubernetes.io/projected/57261fdb-23d0-4aa2-bec2-a427e31a8776-kube-api-access-lklz6\") pod \"57261fdb-23d0-4aa2-bec2-a427e31a8776\" (UID: \"57261fdb-23d0-4aa2-bec2-a427e31a8776\") " Nov 24 09:00:03 crc kubenswrapper[4691]: I1124 09:00:03.930478 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/57261fdb-23d0-4aa2-bec2-a427e31a8776-secret-volume\") pod \"57261fdb-23d0-4aa2-bec2-a427e31a8776\" (UID: \"57261fdb-23d0-4aa2-bec2-a427e31a8776\") " Nov 24 09:00:03 crc kubenswrapper[4691]: I1124 09:00:03.930760 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/57261fdb-23d0-4aa2-bec2-a427e31a8776-config-volume\") pod \"57261fdb-23d0-4aa2-bec2-a427e31a8776\" (UID: \"57261fdb-23d0-4aa2-bec2-a427e31a8776\") " Nov 24 09:00:03 crc kubenswrapper[4691]: I1124 09:00:03.933935 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/57261fdb-23d0-4aa2-bec2-a427e31a8776-config-volume" (OuterVolumeSpecName: "config-volume") pod "57261fdb-23d0-4aa2-bec2-a427e31a8776" (UID: "57261fdb-23d0-4aa2-bec2-a427e31a8776"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 09:00:03 crc kubenswrapper[4691]: I1124 09:00:03.942762 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57261fdb-23d0-4aa2-bec2-a427e31a8776-kube-api-access-lklz6" (OuterVolumeSpecName: "kube-api-access-lklz6") pod "57261fdb-23d0-4aa2-bec2-a427e31a8776" (UID: "57261fdb-23d0-4aa2-bec2-a427e31a8776"). InnerVolumeSpecName "kube-api-access-lklz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 09:00:03 crc kubenswrapper[4691]: I1124 09:00:03.944715 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57261fdb-23d0-4aa2-bec2-a427e31a8776-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "57261fdb-23d0-4aa2-bec2-a427e31a8776" (UID: "57261fdb-23d0-4aa2-bec2-a427e31a8776"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 09:00:04 crc kubenswrapper[4691]: I1124 09:00:04.033362 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lklz6\" (UniqueName: \"kubernetes.io/projected/57261fdb-23d0-4aa2-bec2-a427e31a8776-kube-api-access-lklz6\") on node \"crc\" DevicePath \"\"" Nov 24 09:00:04 crc kubenswrapper[4691]: I1124 09:00:04.033400 4691 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/57261fdb-23d0-4aa2-bec2-a427e31a8776-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 24 09:00:04 crc kubenswrapper[4691]: I1124 09:00:04.033410 4691 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/57261fdb-23d0-4aa2-bec2-a427e31a8776-config-volume\") on node \"crc\" DevicePath \"\"" Nov 24 09:00:04 crc kubenswrapper[4691]: I1124 09:00:04.393908 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399580-wbvw2" event={"ID":"57261fdb-23d0-4aa2-bec2-a427e31a8776","Type":"ContainerDied","Data":"3508d51eb1a7f82d9a3935181f0c22f8878c50e17d99f6a311dcb8f815b3cb33"} Nov 24 09:00:04 crc kubenswrapper[4691]: I1124 09:00:04.393951 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3508d51eb1a7f82d9a3935181f0c22f8878c50e17d99f6a311dcb8f815b3cb33" Nov 24 09:00:04 crc kubenswrapper[4691]: I1124 09:00:04.394005 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399580-wbvw2" Nov 24 09:00:04 crc kubenswrapper[4691]: I1124 09:00:04.448881 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399535-2ql5g"] Nov 24 09:00:04 crc kubenswrapper[4691]: I1124 09:00:04.456839 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399535-2ql5g"] Nov 24 09:00:04 crc kubenswrapper[4691]: I1124 09:00:04.774294 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d6d9f7af-713b-45b1-89b3-2c82272dc6f5" path="/var/lib/kubelet/pods/d6d9f7af-713b-45b1-89b3-2c82272dc6f5/volumes" Nov 24 09:00:12 crc kubenswrapper[4691]: I1124 09:00:12.761072 4691 scope.go:117] "RemoveContainer" containerID="3d6ce131b8ac26b1cb7a5759952caf13c375831dd0462ac48f97930734760a1f" Nov 24 09:00:12 crc kubenswrapper[4691]: E1124 09:00:12.762263 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:00:24 crc kubenswrapper[4691]: I1124 09:00:24.263621 4691 scope.go:117] "RemoveContainer" containerID="b43f17fe1c2741de46fb7ee9e927de00f56dfe317eaca8ccb3d958f86d04ca92" Nov 24 09:00:24 crc kubenswrapper[4691]: I1124 09:00:24.301187 4691 scope.go:117] "RemoveContainer" containerID="f894ee2fc0f1ff2443b16fa9340aa6b93e2cdb10eb79f136406242765f8aa702" Nov 24 09:00:24 crc kubenswrapper[4691]: I1124 09:00:24.346317 4691 scope.go:117] "RemoveContainer" containerID="6ed5ec908e95f711549c6b09eedd7d9d9c2c1be131aa6dc077fcec502198e943" Nov 24 09:00:24 crc kubenswrapper[4691]: I1124 09:00:24.408786 4691 scope.go:117] "RemoveContainer" containerID="29ec6b3b9920f7a0874cae6941d0d5813d0d341c625e814f84b86cb3714ce5c8" Nov 24 09:00:27 crc kubenswrapper[4691]: I1124 09:00:27.760778 4691 scope.go:117] "RemoveContainer" containerID="3d6ce131b8ac26b1cb7a5759952caf13c375831dd0462ac48f97930734760a1f" Nov 24 09:00:27 crc kubenswrapper[4691]: E1124 09:00:27.761890 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:00:38 crc kubenswrapper[4691]: I1124 09:00:38.766869 4691 scope.go:117] "RemoveContainer" containerID="3d6ce131b8ac26b1cb7a5759952caf13c375831dd0462ac48f97930734760a1f" Nov 24 09:00:38 crc kubenswrapper[4691]: E1124 09:00:38.768166 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:00:50 crc kubenswrapper[4691]: I1124 09:00:50.761476 4691 scope.go:117] "RemoveContainer" containerID="3d6ce131b8ac26b1cb7a5759952caf13c375831dd0462ac48f97930734760a1f" Nov 24 09:00:50 crc kubenswrapper[4691]: E1124 09:00:50.762369 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:00:59 crc kubenswrapper[4691]: I1124 09:00:59.267795 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-xg6jq"] Nov 24 09:00:59 crc kubenswrapper[4691]: E1124 09:00:59.268935 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57261fdb-23d0-4aa2-bec2-a427e31a8776" containerName="collect-profiles" Nov 24 09:00:59 crc kubenswrapper[4691]: I1124 09:00:59.268953 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="57261fdb-23d0-4aa2-bec2-a427e31a8776" containerName="collect-profiles" Nov 24 09:00:59 crc kubenswrapper[4691]: I1124 09:00:59.269152 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="57261fdb-23d0-4aa2-bec2-a427e31a8776" containerName="collect-profiles" Nov 24 09:00:59 crc kubenswrapper[4691]: I1124 09:00:59.271177 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xg6jq" Nov 24 09:00:59 crc kubenswrapper[4691]: I1124 09:00:59.289798 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xg6jq"] Nov 24 09:00:59 crc kubenswrapper[4691]: I1124 09:00:59.421114 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba33dce5-90b4-45ad-a9cb-f660871415f2-utilities\") pod \"redhat-operators-xg6jq\" (UID: \"ba33dce5-90b4-45ad-a9cb-f660871415f2\") " pod="openshift-marketplace/redhat-operators-xg6jq" Nov 24 09:00:59 crc kubenswrapper[4691]: I1124 09:00:59.421235 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dn7wg\" (UniqueName: \"kubernetes.io/projected/ba33dce5-90b4-45ad-a9cb-f660871415f2-kube-api-access-dn7wg\") pod \"redhat-operators-xg6jq\" (UID: \"ba33dce5-90b4-45ad-a9cb-f660871415f2\") " pod="openshift-marketplace/redhat-operators-xg6jq" Nov 24 09:00:59 crc kubenswrapper[4691]: I1124 09:00:59.421310 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba33dce5-90b4-45ad-a9cb-f660871415f2-catalog-content\") pod \"redhat-operators-xg6jq\" (UID: \"ba33dce5-90b4-45ad-a9cb-f660871415f2\") " pod="openshift-marketplace/redhat-operators-xg6jq" Nov 24 09:00:59 crc kubenswrapper[4691]: I1124 09:00:59.523405 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dn7wg\" (UniqueName: \"kubernetes.io/projected/ba33dce5-90b4-45ad-a9cb-f660871415f2-kube-api-access-dn7wg\") pod \"redhat-operators-xg6jq\" (UID: \"ba33dce5-90b4-45ad-a9cb-f660871415f2\") " pod="openshift-marketplace/redhat-operators-xg6jq" Nov 24 09:00:59 crc kubenswrapper[4691]: I1124 09:00:59.523518 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba33dce5-90b4-45ad-a9cb-f660871415f2-catalog-content\") pod \"redhat-operators-xg6jq\" (UID: \"ba33dce5-90b4-45ad-a9cb-f660871415f2\") " pod="openshift-marketplace/redhat-operators-xg6jq" Nov 24 09:00:59 crc kubenswrapper[4691]: I1124 09:00:59.523634 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba33dce5-90b4-45ad-a9cb-f660871415f2-utilities\") pod \"redhat-operators-xg6jq\" (UID: \"ba33dce5-90b4-45ad-a9cb-f660871415f2\") " pod="openshift-marketplace/redhat-operators-xg6jq" Nov 24 09:00:59 crc kubenswrapper[4691]: I1124 09:00:59.524159 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba33dce5-90b4-45ad-a9cb-f660871415f2-catalog-content\") pod \"redhat-operators-xg6jq\" (UID: \"ba33dce5-90b4-45ad-a9cb-f660871415f2\") " pod="openshift-marketplace/redhat-operators-xg6jq" Nov 24 09:00:59 crc kubenswrapper[4691]: I1124 09:00:59.524205 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba33dce5-90b4-45ad-a9cb-f660871415f2-utilities\") pod \"redhat-operators-xg6jq\" (UID: \"ba33dce5-90b4-45ad-a9cb-f660871415f2\") " pod="openshift-marketplace/redhat-operators-xg6jq" Nov 24 09:00:59 crc kubenswrapper[4691]: I1124 09:00:59.542853 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dn7wg\" (UniqueName: \"kubernetes.io/projected/ba33dce5-90b4-45ad-a9cb-f660871415f2-kube-api-access-dn7wg\") pod \"redhat-operators-xg6jq\" (UID: \"ba33dce5-90b4-45ad-a9cb-f660871415f2\") " pod="openshift-marketplace/redhat-operators-xg6jq" Nov 24 09:00:59 crc kubenswrapper[4691]: I1124 09:00:59.641148 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xg6jq" Nov 24 09:01:00 crc kubenswrapper[4691]: I1124 09:01:00.101259 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xg6jq"] Nov 24 09:01:00 crc kubenswrapper[4691]: I1124 09:01:00.164843 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29399581-wrkt4"] Nov 24 09:01:00 crc kubenswrapper[4691]: I1124 09:01:00.173629 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29399581-wrkt4" Nov 24 09:01:00 crc kubenswrapper[4691]: I1124 09:01:00.181048 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29399581-wrkt4"] Nov 24 09:01:00 crc kubenswrapper[4691]: I1124 09:01:00.340402 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de9d27b1-63c1-4cc9-9bd6-9d015c3122cf-combined-ca-bundle\") pod \"keystone-cron-29399581-wrkt4\" (UID: \"de9d27b1-63c1-4cc9-9bd6-9d015c3122cf\") " pod="openstack/keystone-cron-29399581-wrkt4" Nov 24 09:01:00 crc kubenswrapper[4691]: I1124 09:01:00.340491 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/de9d27b1-63c1-4cc9-9bd6-9d015c3122cf-fernet-keys\") pod \"keystone-cron-29399581-wrkt4\" (UID: \"de9d27b1-63c1-4cc9-9bd6-9d015c3122cf\") " pod="openstack/keystone-cron-29399581-wrkt4" Nov 24 09:01:00 crc kubenswrapper[4691]: I1124 09:01:00.340573 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fn9k9\" (UniqueName: \"kubernetes.io/projected/de9d27b1-63c1-4cc9-9bd6-9d015c3122cf-kube-api-access-fn9k9\") pod \"keystone-cron-29399581-wrkt4\" (UID: \"de9d27b1-63c1-4cc9-9bd6-9d015c3122cf\") " pod="openstack/keystone-cron-29399581-wrkt4" Nov 24 09:01:00 crc kubenswrapper[4691]: I1124 09:01:00.340598 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de9d27b1-63c1-4cc9-9bd6-9d015c3122cf-config-data\") pod \"keystone-cron-29399581-wrkt4\" (UID: \"de9d27b1-63c1-4cc9-9bd6-9d015c3122cf\") " pod="openstack/keystone-cron-29399581-wrkt4" Nov 24 09:01:00 crc kubenswrapper[4691]: I1124 09:01:00.442123 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de9d27b1-63c1-4cc9-9bd6-9d015c3122cf-combined-ca-bundle\") pod \"keystone-cron-29399581-wrkt4\" (UID: \"de9d27b1-63c1-4cc9-9bd6-9d015c3122cf\") " pod="openstack/keystone-cron-29399581-wrkt4" Nov 24 09:01:00 crc kubenswrapper[4691]: I1124 09:01:00.442182 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/de9d27b1-63c1-4cc9-9bd6-9d015c3122cf-fernet-keys\") pod \"keystone-cron-29399581-wrkt4\" (UID: \"de9d27b1-63c1-4cc9-9bd6-9d015c3122cf\") " pod="openstack/keystone-cron-29399581-wrkt4" Nov 24 09:01:00 crc kubenswrapper[4691]: I1124 09:01:00.442242 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fn9k9\" (UniqueName: \"kubernetes.io/projected/de9d27b1-63c1-4cc9-9bd6-9d015c3122cf-kube-api-access-fn9k9\") pod \"keystone-cron-29399581-wrkt4\" (UID: \"de9d27b1-63c1-4cc9-9bd6-9d015c3122cf\") " pod="openstack/keystone-cron-29399581-wrkt4" Nov 24 09:01:00 crc kubenswrapper[4691]: I1124 09:01:00.442260 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de9d27b1-63c1-4cc9-9bd6-9d015c3122cf-config-data\") pod \"keystone-cron-29399581-wrkt4\" (UID: \"de9d27b1-63c1-4cc9-9bd6-9d015c3122cf\") " pod="openstack/keystone-cron-29399581-wrkt4" Nov 24 09:01:00 crc kubenswrapper[4691]: I1124 09:01:00.450861 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de9d27b1-63c1-4cc9-9bd6-9d015c3122cf-combined-ca-bundle\") pod \"keystone-cron-29399581-wrkt4\" (UID: \"de9d27b1-63c1-4cc9-9bd6-9d015c3122cf\") " pod="openstack/keystone-cron-29399581-wrkt4" Nov 24 09:01:00 crc kubenswrapper[4691]: I1124 09:01:00.450923 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/de9d27b1-63c1-4cc9-9bd6-9d015c3122cf-fernet-keys\") pod \"keystone-cron-29399581-wrkt4\" (UID: \"de9d27b1-63c1-4cc9-9bd6-9d015c3122cf\") " pod="openstack/keystone-cron-29399581-wrkt4" Nov 24 09:01:00 crc kubenswrapper[4691]: I1124 09:01:00.451213 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de9d27b1-63c1-4cc9-9bd6-9d015c3122cf-config-data\") pod \"keystone-cron-29399581-wrkt4\" (UID: \"de9d27b1-63c1-4cc9-9bd6-9d015c3122cf\") " pod="openstack/keystone-cron-29399581-wrkt4" Nov 24 09:01:00 crc kubenswrapper[4691]: I1124 09:01:00.468267 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fn9k9\" (UniqueName: \"kubernetes.io/projected/de9d27b1-63c1-4cc9-9bd6-9d015c3122cf-kube-api-access-fn9k9\") pod \"keystone-cron-29399581-wrkt4\" (UID: \"de9d27b1-63c1-4cc9-9bd6-9d015c3122cf\") " pod="openstack/keystone-cron-29399581-wrkt4" Nov 24 09:01:00 crc kubenswrapper[4691]: I1124 09:01:00.546910 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29399581-wrkt4" Nov 24 09:01:00 crc kubenswrapper[4691]: I1124 09:01:00.938366 4691 generic.go:334] "Generic (PLEG): container finished" podID="ba33dce5-90b4-45ad-a9cb-f660871415f2" containerID="3534a01fa026bab5fb17204e160c147f1d20c97b853d8c5c9d96189fbc58526f" exitCode=0 Nov 24 09:01:00 crc kubenswrapper[4691]: I1124 09:01:00.938569 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xg6jq" event={"ID":"ba33dce5-90b4-45ad-a9cb-f660871415f2","Type":"ContainerDied","Data":"3534a01fa026bab5fb17204e160c147f1d20c97b853d8c5c9d96189fbc58526f"} Nov 24 09:01:00 crc kubenswrapper[4691]: I1124 09:01:00.938764 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xg6jq" event={"ID":"ba33dce5-90b4-45ad-a9cb-f660871415f2","Type":"ContainerStarted","Data":"b8bd8822c830b384e95254856c8390eeee92e414246aabb9746c3c6357794bee"} Nov 24 09:01:00 crc kubenswrapper[4691]: I1124 09:01:00.941285 4691 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 09:01:00 crc kubenswrapper[4691]: W1124 09:01:00.992226 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podde9d27b1_63c1_4cc9_9bd6_9d015c3122cf.slice/crio-d4b550c2f9326761132e2ae8ac4ef7f2d1ae7e1f508421c7457c646a3bc63d62 WatchSource:0}: Error finding container d4b550c2f9326761132e2ae8ac4ef7f2d1ae7e1f508421c7457c646a3bc63d62: Status 404 returned error can't find the container with id d4b550c2f9326761132e2ae8ac4ef7f2d1ae7e1f508421c7457c646a3bc63d62 Nov 24 09:01:00 crc kubenswrapper[4691]: I1124 09:01:00.998354 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29399581-wrkt4"] Nov 24 09:01:01 crc kubenswrapper[4691]: I1124 09:01:01.760987 4691 scope.go:117] "RemoveContainer" containerID="3d6ce131b8ac26b1cb7a5759952caf13c375831dd0462ac48f97930734760a1f" Nov 24 09:01:01 crc kubenswrapper[4691]: E1124 09:01:01.761655 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:01:01 crc kubenswrapper[4691]: I1124 09:01:01.951811 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29399581-wrkt4" event={"ID":"de9d27b1-63c1-4cc9-9bd6-9d015c3122cf","Type":"ContainerStarted","Data":"c7dc93adccbac171be1a32f906ddeec6bd4ef6cb85285a2d55362fb5626dbef7"} Nov 24 09:01:01 crc kubenswrapper[4691]: I1124 09:01:01.951876 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29399581-wrkt4" event={"ID":"de9d27b1-63c1-4cc9-9bd6-9d015c3122cf","Type":"ContainerStarted","Data":"d4b550c2f9326761132e2ae8ac4ef7f2d1ae7e1f508421c7457c646a3bc63d62"} Nov 24 09:01:02 crc kubenswrapper[4691]: I1124 09:01:02.966540 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xg6jq" event={"ID":"ba33dce5-90b4-45ad-a9cb-f660871415f2","Type":"ContainerStarted","Data":"2f39379eb1a1d3e4ea6d96c1615769f4ec4cf901afe5c97fa83adfceabf7e32c"} Nov 24 09:01:02 crc kubenswrapper[4691]: I1124 09:01:02.990059 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29399581-wrkt4" podStartSLOduration=2.990036989 podStartE2EDuration="2.990036989s" podCreationTimestamp="2025-11-24 09:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 09:01:01.977841067 +0000 UTC m=+3823.976790326" watchObservedRunningTime="2025-11-24 09:01:02.990036989 +0000 UTC m=+3824.988986228" Nov 24 09:01:03 crc kubenswrapper[4691]: I1124 09:01:03.974856 4691 generic.go:334] "Generic (PLEG): container finished" podID="de9d27b1-63c1-4cc9-9bd6-9d015c3122cf" containerID="c7dc93adccbac171be1a32f906ddeec6bd4ef6cb85285a2d55362fb5626dbef7" exitCode=0 Nov 24 09:01:03 crc kubenswrapper[4691]: I1124 09:01:03.974956 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29399581-wrkt4" event={"ID":"de9d27b1-63c1-4cc9-9bd6-9d015c3122cf","Type":"ContainerDied","Data":"c7dc93adccbac171be1a32f906ddeec6bd4ef6cb85285a2d55362fb5626dbef7"} Nov 24 09:01:03 crc kubenswrapper[4691]: I1124 09:01:03.977852 4691 generic.go:334] "Generic (PLEG): container finished" podID="ba33dce5-90b4-45ad-a9cb-f660871415f2" containerID="2f39379eb1a1d3e4ea6d96c1615769f4ec4cf901afe5c97fa83adfceabf7e32c" exitCode=0 Nov 24 09:01:03 crc kubenswrapper[4691]: I1124 09:01:03.977895 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xg6jq" event={"ID":"ba33dce5-90b4-45ad-a9cb-f660871415f2","Type":"ContainerDied","Data":"2f39379eb1a1d3e4ea6d96c1615769f4ec4cf901afe5c97fa83adfceabf7e32c"} Nov 24 09:01:04 crc kubenswrapper[4691]: I1124 09:01:04.989762 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xg6jq" event={"ID":"ba33dce5-90b4-45ad-a9cb-f660871415f2","Type":"ContainerStarted","Data":"b4508fb7bd5447ba1314562f954c66ee663f439a3d1fc328cf7ba1cf604395c8"} Nov 24 09:01:05 crc kubenswrapper[4691]: I1124 09:01:05.022499 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-xg6jq" podStartSLOduration=2.45471068 podStartE2EDuration="6.022440651s" podCreationTimestamp="2025-11-24 09:00:59 +0000 UTC" firstStartedPulling="2025-11-24 09:01:00.941068259 +0000 UTC m=+3822.940017508" lastFinishedPulling="2025-11-24 09:01:04.50879823 +0000 UTC m=+3826.507747479" observedRunningTime="2025-11-24 09:01:05.02020544 +0000 UTC m=+3827.019154699" watchObservedRunningTime="2025-11-24 09:01:05.022440651 +0000 UTC m=+3827.021389900" Nov 24 09:01:05 crc kubenswrapper[4691]: I1124 09:01:05.407590 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29399581-wrkt4" Nov 24 09:01:05 crc kubenswrapper[4691]: I1124 09:01:05.410135 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/de9d27b1-63c1-4cc9-9bd6-9d015c3122cf-fernet-keys\") pod \"de9d27b1-63c1-4cc9-9bd6-9d015c3122cf\" (UID: \"de9d27b1-63c1-4cc9-9bd6-9d015c3122cf\") " Nov 24 09:01:05 crc kubenswrapper[4691]: I1124 09:01:05.410193 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fn9k9\" (UniqueName: \"kubernetes.io/projected/de9d27b1-63c1-4cc9-9bd6-9d015c3122cf-kube-api-access-fn9k9\") pod \"de9d27b1-63c1-4cc9-9bd6-9d015c3122cf\" (UID: \"de9d27b1-63c1-4cc9-9bd6-9d015c3122cf\") " Nov 24 09:01:05 crc kubenswrapper[4691]: I1124 09:01:05.410222 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de9d27b1-63c1-4cc9-9bd6-9d015c3122cf-config-data\") pod \"de9d27b1-63c1-4cc9-9bd6-9d015c3122cf\" (UID: \"de9d27b1-63c1-4cc9-9bd6-9d015c3122cf\") " Nov 24 09:01:05 crc kubenswrapper[4691]: I1124 09:01:05.410249 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de9d27b1-63c1-4cc9-9bd6-9d015c3122cf-combined-ca-bundle\") pod \"de9d27b1-63c1-4cc9-9bd6-9d015c3122cf\" (UID: \"de9d27b1-63c1-4cc9-9bd6-9d015c3122cf\") " Nov 24 09:01:05 crc kubenswrapper[4691]: I1124 09:01:05.418130 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de9d27b1-63c1-4cc9-9bd6-9d015c3122cf-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "de9d27b1-63c1-4cc9-9bd6-9d015c3122cf" (UID: "de9d27b1-63c1-4cc9-9bd6-9d015c3122cf"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 09:01:05 crc kubenswrapper[4691]: I1124 09:01:05.418782 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de9d27b1-63c1-4cc9-9bd6-9d015c3122cf-kube-api-access-fn9k9" (OuterVolumeSpecName: "kube-api-access-fn9k9") pod "de9d27b1-63c1-4cc9-9bd6-9d015c3122cf" (UID: "de9d27b1-63c1-4cc9-9bd6-9d015c3122cf"). InnerVolumeSpecName "kube-api-access-fn9k9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 09:01:05 crc kubenswrapper[4691]: I1124 09:01:05.460645 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de9d27b1-63c1-4cc9-9bd6-9d015c3122cf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "de9d27b1-63c1-4cc9-9bd6-9d015c3122cf" (UID: "de9d27b1-63c1-4cc9-9bd6-9d015c3122cf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 09:01:05 crc kubenswrapper[4691]: I1124 09:01:05.488777 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de9d27b1-63c1-4cc9-9bd6-9d015c3122cf-config-data" (OuterVolumeSpecName: "config-data") pod "de9d27b1-63c1-4cc9-9bd6-9d015c3122cf" (UID: "de9d27b1-63c1-4cc9-9bd6-9d015c3122cf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 09:01:05 crc kubenswrapper[4691]: I1124 09:01:05.512079 4691 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/de9d27b1-63c1-4cc9-9bd6-9d015c3122cf-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 24 09:01:05 crc kubenswrapper[4691]: I1124 09:01:05.512124 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fn9k9\" (UniqueName: \"kubernetes.io/projected/de9d27b1-63c1-4cc9-9bd6-9d015c3122cf-kube-api-access-fn9k9\") on node \"crc\" DevicePath \"\"" Nov 24 09:01:05 crc kubenswrapper[4691]: I1124 09:01:05.512139 4691 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de9d27b1-63c1-4cc9-9bd6-9d015c3122cf-config-data\") on node \"crc\" DevicePath \"\"" Nov 24 09:01:05 crc kubenswrapper[4691]: I1124 09:01:05.512150 4691 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de9d27b1-63c1-4cc9-9bd6-9d015c3122cf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 24 09:01:06 crc kubenswrapper[4691]: I1124 09:01:06.000903 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29399581-wrkt4" event={"ID":"de9d27b1-63c1-4cc9-9bd6-9d015c3122cf","Type":"ContainerDied","Data":"d4b550c2f9326761132e2ae8ac4ef7f2d1ae7e1f508421c7457c646a3bc63d62"} Nov 24 09:01:06 crc kubenswrapper[4691]: I1124 09:01:06.000955 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d4b550c2f9326761132e2ae8ac4ef7f2d1ae7e1f508421c7457c646a3bc63d62" Nov 24 09:01:06 crc kubenswrapper[4691]: I1124 09:01:06.000919 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29399581-wrkt4" Nov 24 09:01:09 crc kubenswrapper[4691]: I1124 09:01:09.641924 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-xg6jq" Nov 24 09:01:09 crc kubenswrapper[4691]: I1124 09:01:09.642484 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-xg6jq" Nov 24 09:01:10 crc kubenswrapper[4691]: I1124 09:01:10.690418 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-xg6jq" podUID="ba33dce5-90b4-45ad-a9cb-f660871415f2" containerName="registry-server" probeResult="failure" output=< Nov 24 09:01:10 crc kubenswrapper[4691]: timeout: failed to connect service ":50051" within 1s Nov 24 09:01:10 crc kubenswrapper[4691]: > Nov 24 09:01:12 crc kubenswrapper[4691]: I1124 09:01:12.761042 4691 scope.go:117] "RemoveContainer" containerID="3d6ce131b8ac26b1cb7a5759952caf13c375831dd0462ac48f97930734760a1f" Nov 24 09:01:12 crc kubenswrapper[4691]: E1124 09:01:12.761690 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:01:19 crc kubenswrapper[4691]: I1124 09:01:19.692538 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-xg6jq" Nov 24 09:01:19 crc kubenswrapper[4691]: I1124 09:01:19.739982 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-xg6jq" Nov 24 09:01:19 crc kubenswrapper[4691]: I1124 09:01:19.929372 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xg6jq"] Nov 24 09:01:21 crc kubenswrapper[4691]: I1124 09:01:21.135346 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-xg6jq" podUID="ba33dce5-90b4-45ad-a9cb-f660871415f2" containerName="registry-server" containerID="cri-o://b4508fb7bd5447ba1314562f954c66ee663f439a3d1fc328cf7ba1cf604395c8" gracePeriod=2 Nov 24 09:01:21 crc kubenswrapper[4691]: I1124 09:01:21.718237 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xg6jq" Nov 24 09:01:21 crc kubenswrapper[4691]: I1124 09:01:21.918821 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba33dce5-90b4-45ad-a9cb-f660871415f2-utilities\") pod \"ba33dce5-90b4-45ad-a9cb-f660871415f2\" (UID: \"ba33dce5-90b4-45ad-a9cb-f660871415f2\") " Nov 24 09:01:21 crc kubenswrapper[4691]: I1124 09:01:21.919594 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba33dce5-90b4-45ad-a9cb-f660871415f2-catalog-content\") pod \"ba33dce5-90b4-45ad-a9cb-f660871415f2\" (UID: \"ba33dce5-90b4-45ad-a9cb-f660871415f2\") " Nov 24 09:01:21 crc kubenswrapper[4691]: I1124 09:01:21.919679 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dn7wg\" (UniqueName: \"kubernetes.io/projected/ba33dce5-90b4-45ad-a9cb-f660871415f2-kube-api-access-dn7wg\") pod \"ba33dce5-90b4-45ad-a9cb-f660871415f2\" (UID: \"ba33dce5-90b4-45ad-a9cb-f660871415f2\") " Nov 24 09:01:21 crc kubenswrapper[4691]: I1124 09:01:21.920174 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba33dce5-90b4-45ad-a9cb-f660871415f2-utilities" (OuterVolumeSpecName: "utilities") pod "ba33dce5-90b4-45ad-a9cb-f660871415f2" (UID: "ba33dce5-90b4-45ad-a9cb-f660871415f2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 09:01:21 crc kubenswrapper[4691]: I1124 09:01:21.921298 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba33dce5-90b4-45ad-a9cb-f660871415f2-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 09:01:21 crc kubenswrapper[4691]: I1124 09:01:21.930227 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba33dce5-90b4-45ad-a9cb-f660871415f2-kube-api-access-dn7wg" (OuterVolumeSpecName: "kube-api-access-dn7wg") pod "ba33dce5-90b4-45ad-a9cb-f660871415f2" (UID: "ba33dce5-90b4-45ad-a9cb-f660871415f2"). InnerVolumeSpecName "kube-api-access-dn7wg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 09:01:22 crc kubenswrapper[4691]: I1124 09:01:22.014341 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba33dce5-90b4-45ad-a9cb-f660871415f2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ba33dce5-90b4-45ad-a9cb-f660871415f2" (UID: "ba33dce5-90b4-45ad-a9cb-f660871415f2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 09:01:22 crc kubenswrapper[4691]: I1124 09:01:22.023810 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba33dce5-90b4-45ad-a9cb-f660871415f2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 09:01:22 crc kubenswrapper[4691]: I1124 09:01:22.024091 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dn7wg\" (UniqueName: \"kubernetes.io/projected/ba33dce5-90b4-45ad-a9cb-f660871415f2-kube-api-access-dn7wg\") on node \"crc\" DevicePath \"\"" Nov 24 09:01:22 crc kubenswrapper[4691]: I1124 09:01:22.147017 4691 generic.go:334] "Generic (PLEG): container finished" podID="ba33dce5-90b4-45ad-a9cb-f660871415f2" containerID="b4508fb7bd5447ba1314562f954c66ee663f439a3d1fc328cf7ba1cf604395c8" exitCode=0 Nov 24 09:01:22 crc kubenswrapper[4691]: I1124 09:01:22.147059 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xg6jq" event={"ID":"ba33dce5-90b4-45ad-a9cb-f660871415f2","Type":"ContainerDied","Data":"b4508fb7bd5447ba1314562f954c66ee663f439a3d1fc328cf7ba1cf604395c8"} Nov 24 09:01:22 crc kubenswrapper[4691]: I1124 09:01:22.147073 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xg6jq" Nov 24 09:01:22 crc kubenswrapper[4691]: I1124 09:01:22.147091 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xg6jq" event={"ID":"ba33dce5-90b4-45ad-a9cb-f660871415f2","Type":"ContainerDied","Data":"b8bd8822c830b384e95254856c8390eeee92e414246aabb9746c3c6357794bee"} Nov 24 09:01:22 crc kubenswrapper[4691]: I1124 09:01:22.147109 4691 scope.go:117] "RemoveContainer" containerID="b4508fb7bd5447ba1314562f954c66ee663f439a3d1fc328cf7ba1cf604395c8" Nov 24 09:01:22 crc kubenswrapper[4691]: I1124 09:01:22.182663 4691 scope.go:117] "RemoveContainer" containerID="2f39379eb1a1d3e4ea6d96c1615769f4ec4cf901afe5c97fa83adfceabf7e32c" Nov 24 09:01:22 crc kubenswrapper[4691]: I1124 09:01:22.189571 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xg6jq"] Nov 24 09:01:22 crc kubenswrapper[4691]: I1124 09:01:22.199222 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-xg6jq"] Nov 24 09:01:22 crc kubenswrapper[4691]: I1124 09:01:22.212647 4691 scope.go:117] "RemoveContainer" containerID="3534a01fa026bab5fb17204e160c147f1d20c97b853d8c5c9d96189fbc58526f" Nov 24 09:01:22 crc kubenswrapper[4691]: I1124 09:01:22.278762 4691 scope.go:117] "RemoveContainer" containerID="b4508fb7bd5447ba1314562f954c66ee663f439a3d1fc328cf7ba1cf604395c8" Nov 24 09:01:22 crc kubenswrapper[4691]: E1124 09:01:22.279370 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b4508fb7bd5447ba1314562f954c66ee663f439a3d1fc328cf7ba1cf604395c8\": container with ID starting with b4508fb7bd5447ba1314562f954c66ee663f439a3d1fc328cf7ba1cf604395c8 not found: ID does not exist" containerID="b4508fb7bd5447ba1314562f954c66ee663f439a3d1fc328cf7ba1cf604395c8" Nov 24 09:01:22 crc kubenswrapper[4691]: I1124 09:01:22.279503 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4508fb7bd5447ba1314562f954c66ee663f439a3d1fc328cf7ba1cf604395c8"} err="failed to get container status \"b4508fb7bd5447ba1314562f954c66ee663f439a3d1fc328cf7ba1cf604395c8\": rpc error: code = NotFound desc = could not find container \"b4508fb7bd5447ba1314562f954c66ee663f439a3d1fc328cf7ba1cf604395c8\": container with ID starting with b4508fb7bd5447ba1314562f954c66ee663f439a3d1fc328cf7ba1cf604395c8 not found: ID does not exist" Nov 24 09:01:22 crc kubenswrapper[4691]: I1124 09:01:22.279663 4691 scope.go:117] "RemoveContainer" containerID="2f39379eb1a1d3e4ea6d96c1615769f4ec4cf901afe5c97fa83adfceabf7e32c" Nov 24 09:01:22 crc kubenswrapper[4691]: E1124 09:01:22.280155 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f39379eb1a1d3e4ea6d96c1615769f4ec4cf901afe5c97fa83adfceabf7e32c\": container with ID starting with 2f39379eb1a1d3e4ea6d96c1615769f4ec4cf901afe5c97fa83adfceabf7e32c not found: ID does not exist" containerID="2f39379eb1a1d3e4ea6d96c1615769f4ec4cf901afe5c97fa83adfceabf7e32c" Nov 24 09:01:22 crc kubenswrapper[4691]: I1124 09:01:22.280179 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f39379eb1a1d3e4ea6d96c1615769f4ec4cf901afe5c97fa83adfceabf7e32c"} err="failed to get container status \"2f39379eb1a1d3e4ea6d96c1615769f4ec4cf901afe5c97fa83adfceabf7e32c\": rpc error: code = NotFound desc = could not find container \"2f39379eb1a1d3e4ea6d96c1615769f4ec4cf901afe5c97fa83adfceabf7e32c\": container with ID starting with 2f39379eb1a1d3e4ea6d96c1615769f4ec4cf901afe5c97fa83adfceabf7e32c not found: ID does not exist" Nov 24 09:01:22 crc kubenswrapper[4691]: I1124 09:01:22.280192 4691 scope.go:117] "RemoveContainer" containerID="3534a01fa026bab5fb17204e160c147f1d20c97b853d8c5c9d96189fbc58526f" Nov 24 09:01:22 crc kubenswrapper[4691]: E1124 09:01:22.280532 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3534a01fa026bab5fb17204e160c147f1d20c97b853d8c5c9d96189fbc58526f\": container with ID starting with 3534a01fa026bab5fb17204e160c147f1d20c97b853d8c5c9d96189fbc58526f not found: ID does not exist" containerID="3534a01fa026bab5fb17204e160c147f1d20c97b853d8c5c9d96189fbc58526f" Nov 24 09:01:22 crc kubenswrapper[4691]: I1124 09:01:22.280599 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3534a01fa026bab5fb17204e160c147f1d20c97b853d8c5c9d96189fbc58526f"} err="failed to get container status \"3534a01fa026bab5fb17204e160c147f1d20c97b853d8c5c9d96189fbc58526f\": rpc error: code = NotFound desc = could not find container \"3534a01fa026bab5fb17204e160c147f1d20c97b853d8c5c9d96189fbc58526f\": container with ID starting with 3534a01fa026bab5fb17204e160c147f1d20c97b853d8c5c9d96189fbc58526f not found: ID does not exist" Nov 24 09:01:22 crc kubenswrapper[4691]: I1124 09:01:22.773049 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba33dce5-90b4-45ad-a9cb-f660871415f2" path="/var/lib/kubelet/pods/ba33dce5-90b4-45ad-a9cb-f660871415f2/volumes" Nov 24 09:01:27 crc kubenswrapper[4691]: I1124 09:01:27.761222 4691 scope.go:117] "RemoveContainer" containerID="3d6ce131b8ac26b1cb7a5759952caf13c375831dd0462ac48f97930734760a1f" Nov 24 09:01:27 crc kubenswrapper[4691]: E1124 09:01:27.762051 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:01:41 crc kubenswrapper[4691]: I1124 09:01:41.761348 4691 scope.go:117] "RemoveContainer" containerID="3d6ce131b8ac26b1cb7a5759952caf13c375831dd0462ac48f97930734760a1f" Nov 24 09:01:41 crc kubenswrapper[4691]: E1124 09:01:41.763342 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:01:54 crc kubenswrapper[4691]: I1124 09:01:54.761437 4691 scope.go:117] "RemoveContainer" containerID="3d6ce131b8ac26b1cb7a5759952caf13c375831dd0462ac48f97930734760a1f" Nov 24 09:01:54 crc kubenswrapper[4691]: E1124 09:01:54.762292 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:02:06 crc kubenswrapper[4691]: I1124 09:02:06.761273 4691 scope.go:117] "RemoveContainer" containerID="3d6ce131b8ac26b1cb7a5759952caf13c375831dd0462ac48f97930734760a1f" Nov 24 09:02:06 crc kubenswrapper[4691]: E1124 09:02:06.762141 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:02:18 crc kubenswrapper[4691]: I1124 09:02:18.771952 4691 scope.go:117] "RemoveContainer" containerID="3d6ce131b8ac26b1cb7a5759952caf13c375831dd0462ac48f97930734760a1f" Nov 24 09:02:18 crc kubenswrapper[4691]: E1124 09:02:18.773364 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:02:31 crc kubenswrapper[4691]: I1124 09:02:31.761820 4691 scope.go:117] "RemoveContainer" containerID="3d6ce131b8ac26b1cb7a5759952caf13c375831dd0462ac48f97930734760a1f" Nov 24 09:02:32 crc kubenswrapper[4691]: I1124 09:02:32.756679 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerStarted","Data":"c89a895b1442fb66dae9bce4acd58e58a614f26478fea75325d9956aadd18bb2"} Nov 24 09:02:55 crc kubenswrapper[4691]: I1124 09:02:55.660570 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-sswd9"] Nov 24 09:02:55 crc kubenswrapper[4691]: E1124 09:02:55.661618 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba33dce5-90b4-45ad-a9cb-f660871415f2" containerName="registry-server" Nov 24 09:02:55 crc kubenswrapper[4691]: I1124 09:02:55.661633 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba33dce5-90b4-45ad-a9cb-f660871415f2" containerName="registry-server" Nov 24 09:02:55 crc kubenswrapper[4691]: E1124 09:02:55.661657 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de9d27b1-63c1-4cc9-9bd6-9d015c3122cf" containerName="keystone-cron" Nov 24 09:02:55 crc kubenswrapper[4691]: I1124 09:02:55.661665 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="de9d27b1-63c1-4cc9-9bd6-9d015c3122cf" containerName="keystone-cron" Nov 24 09:02:55 crc kubenswrapper[4691]: E1124 09:02:55.661691 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba33dce5-90b4-45ad-a9cb-f660871415f2" containerName="extract-content" Nov 24 09:02:55 crc kubenswrapper[4691]: I1124 09:02:55.661696 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba33dce5-90b4-45ad-a9cb-f660871415f2" containerName="extract-content" Nov 24 09:02:55 crc kubenswrapper[4691]: E1124 09:02:55.661713 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba33dce5-90b4-45ad-a9cb-f660871415f2" containerName="extract-utilities" Nov 24 09:02:55 crc kubenswrapper[4691]: I1124 09:02:55.661718 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba33dce5-90b4-45ad-a9cb-f660871415f2" containerName="extract-utilities" Nov 24 09:02:55 crc kubenswrapper[4691]: I1124 09:02:55.661904 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="de9d27b1-63c1-4cc9-9bd6-9d015c3122cf" containerName="keystone-cron" Nov 24 09:02:55 crc kubenswrapper[4691]: I1124 09:02:55.661930 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba33dce5-90b4-45ad-a9cb-f660871415f2" containerName="registry-server" Nov 24 09:02:55 crc kubenswrapper[4691]: I1124 09:02:55.663298 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sswd9" Nov 24 09:02:55 crc kubenswrapper[4691]: I1124 09:02:55.702189 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-sswd9"] Nov 24 09:02:55 crc kubenswrapper[4691]: I1124 09:02:55.778793 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00cce186-6d61-46a7-9761-4c6180938401-catalog-content\") pod \"community-operators-sswd9\" (UID: \"00cce186-6d61-46a7-9761-4c6180938401\") " pod="openshift-marketplace/community-operators-sswd9" Nov 24 09:02:55 crc kubenswrapper[4691]: I1124 09:02:55.778864 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00cce186-6d61-46a7-9761-4c6180938401-utilities\") pod \"community-operators-sswd9\" (UID: \"00cce186-6d61-46a7-9761-4c6180938401\") " pod="openshift-marketplace/community-operators-sswd9" Nov 24 09:02:55 crc kubenswrapper[4691]: I1124 09:02:55.778967 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rlwrf\" (UniqueName: \"kubernetes.io/projected/00cce186-6d61-46a7-9761-4c6180938401-kube-api-access-rlwrf\") pod \"community-operators-sswd9\" (UID: \"00cce186-6d61-46a7-9761-4c6180938401\") " pod="openshift-marketplace/community-operators-sswd9" Nov 24 09:02:55 crc kubenswrapper[4691]: I1124 09:02:55.881313 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rlwrf\" (UniqueName: \"kubernetes.io/projected/00cce186-6d61-46a7-9761-4c6180938401-kube-api-access-rlwrf\") pod \"community-operators-sswd9\" (UID: \"00cce186-6d61-46a7-9761-4c6180938401\") " pod="openshift-marketplace/community-operators-sswd9" Nov 24 09:02:55 crc kubenswrapper[4691]: I1124 09:02:55.881871 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00cce186-6d61-46a7-9761-4c6180938401-catalog-content\") pod \"community-operators-sswd9\" (UID: \"00cce186-6d61-46a7-9761-4c6180938401\") " pod="openshift-marketplace/community-operators-sswd9" Nov 24 09:02:55 crc kubenswrapper[4691]: I1124 09:02:55.881898 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00cce186-6d61-46a7-9761-4c6180938401-utilities\") pod \"community-operators-sswd9\" (UID: \"00cce186-6d61-46a7-9761-4c6180938401\") " pod="openshift-marketplace/community-operators-sswd9" Nov 24 09:02:55 crc kubenswrapper[4691]: I1124 09:02:55.882395 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00cce186-6d61-46a7-9761-4c6180938401-utilities\") pod \"community-operators-sswd9\" (UID: \"00cce186-6d61-46a7-9761-4c6180938401\") " pod="openshift-marketplace/community-operators-sswd9" Nov 24 09:02:55 crc kubenswrapper[4691]: I1124 09:02:55.882809 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00cce186-6d61-46a7-9761-4c6180938401-catalog-content\") pod \"community-operators-sswd9\" (UID: \"00cce186-6d61-46a7-9761-4c6180938401\") " pod="openshift-marketplace/community-operators-sswd9" Nov 24 09:02:55 crc kubenswrapper[4691]: I1124 09:02:55.911773 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rlwrf\" (UniqueName: \"kubernetes.io/projected/00cce186-6d61-46a7-9761-4c6180938401-kube-api-access-rlwrf\") pod \"community-operators-sswd9\" (UID: \"00cce186-6d61-46a7-9761-4c6180938401\") " pod="openshift-marketplace/community-operators-sswd9" Nov 24 09:02:55 crc kubenswrapper[4691]: I1124 09:02:55.991308 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sswd9" Nov 24 09:02:56 crc kubenswrapper[4691]: I1124 09:02:56.549722 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-sswd9"] Nov 24 09:02:56 crc kubenswrapper[4691]: I1124 09:02:56.965142 4691 generic.go:334] "Generic (PLEG): container finished" podID="00cce186-6d61-46a7-9761-4c6180938401" containerID="c8c471dd4e190d29f49df9f9a227a29f0e30fa61f5cc9a3f9cf77e61e64192ab" exitCode=0 Nov 24 09:02:56 crc kubenswrapper[4691]: I1124 09:02:56.965265 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sswd9" event={"ID":"00cce186-6d61-46a7-9761-4c6180938401","Type":"ContainerDied","Data":"c8c471dd4e190d29f49df9f9a227a29f0e30fa61f5cc9a3f9cf77e61e64192ab"} Nov 24 09:02:56 crc kubenswrapper[4691]: I1124 09:02:56.965950 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sswd9" event={"ID":"00cce186-6d61-46a7-9761-4c6180938401","Type":"ContainerStarted","Data":"a7936b1afbcb12c5439a283e9a89db4eb372d2b446a0890e8892bcfaebdeb68c"} Nov 24 09:02:57 crc kubenswrapper[4691]: I1124 09:02:57.978067 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sswd9" event={"ID":"00cce186-6d61-46a7-9761-4c6180938401","Type":"ContainerStarted","Data":"6cb3d1859d0dc70766ffcefa38fe633c7c8de5851d1cf0860cf198ce46dd0705"} Nov 24 09:02:58 crc kubenswrapper[4691]: I1124 09:02:58.987718 4691 generic.go:334] "Generic (PLEG): container finished" podID="00cce186-6d61-46a7-9761-4c6180938401" containerID="6cb3d1859d0dc70766ffcefa38fe633c7c8de5851d1cf0860cf198ce46dd0705" exitCode=0 Nov 24 09:02:58 crc kubenswrapper[4691]: I1124 09:02:58.987769 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sswd9" event={"ID":"00cce186-6d61-46a7-9761-4c6180938401","Type":"ContainerDied","Data":"6cb3d1859d0dc70766ffcefa38fe633c7c8de5851d1cf0860cf198ce46dd0705"} Nov 24 09:03:01 crc kubenswrapper[4691]: I1124 09:03:01.008407 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sswd9" event={"ID":"00cce186-6d61-46a7-9761-4c6180938401","Type":"ContainerStarted","Data":"53c2951efff93b253f714417dfcfa9eec3f2b6d373592141542f83b8f20ddff6"} Nov 24 09:03:01 crc kubenswrapper[4691]: I1124 09:03:01.027959 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-sswd9" podStartSLOduration=3.588472505 podStartE2EDuration="6.027938535s" podCreationTimestamp="2025-11-24 09:02:55 +0000 UTC" firstStartedPulling="2025-11-24 09:02:56.967697802 +0000 UTC m=+3938.966647041" lastFinishedPulling="2025-11-24 09:02:59.407163832 +0000 UTC m=+3941.406113071" observedRunningTime="2025-11-24 09:03:01.026979368 +0000 UTC m=+3943.025928637" watchObservedRunningTime="2025-11-24 09:03:01.027938535 +0000 UTC m=+3943.026887784" Nov 24 09:03:05 crc kubenswrapper[4691]: I1124 09:03:05.992204 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-sswd9" Nov 24 09:03:05 crc kubenswrapper[4691]: I1124 09:03:05.993143 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-sswd9" Nov 24 09:03:06 crc kubenswrapper[4691]: I1124 09:03:06.034890 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-sswd9" Nov 24 09:03:06 crc kubenswrapper[4691]: I1124 09:03:06.111181 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-sswd9" Nov 24 09:03:06 crc kubenswrapper[4691]: I1124 09:03:06.271746 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-sswd9"] Nov 24 09:03:08 crc kubenswrapper[4691]: I1124 09:03:08.067156 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-sswd9" podUID="00cce186-6d61-46a7-9761-4c6180938401" containerName="registry-server" containerID="cri-o://53c2951efff93b253f714417dfcfa9eec3f2b6d373592141542f83b8f20ddff6" gracePeriod=2 Nov 24 09:03:09 crc kubenswrapper[4691]: I1124 09:03:09.081250 4691 generic.go:334] "Generic (PLEG): container finished" podID="00cce186-6d61-46a7-9761-4c6180938401" containerID="53c2951efff93b253f714417dfcfa9eec3f2b6d373592141542f83b8f20ddff6" exitCode=0 Nov 24 09:03:09 crc kubenswrapper[4691]: I1124 09:03:09.081371 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sswd9" event={"ID":"00cce186-6d61-46a7-9761-4c6180938401","Type":"ContainerDied","Data":"53c2951efff93b253f714417dfcfa9eec3f2b6d373592141542f83b8f20ddff6"} Nov 24 09:03:09 crc kubenswrapper[4691]: I1124 09:03:09.082348 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sswd9" event={"ID":"00cce186-6d61-46a7-9761-4c6180938401","Type":"ContainerDied","Data":"a7936b1afbcb12c5439a283e9a89db4eb372d2b446a0890e8892bcfaebdeb68c"} Nov 24 09:03:09 crc kubenswrapper[4691]: I1124 09:03:09.082375 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a7936b1afbcb12c5439a283e9a89db4eb372d2b446a0890e8892bcfaebdeb68c" Nov 24 09:03:09 crc kubenswrapper[4691]: I1124 09:03:09.085359 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sswd9" Nov 24 09:03:09 crc kubenswrapper[4691]: I1124 09:03:09.152574 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00cce186-6d61-46a7-9761-4c6180938401-catalog-content\") pod \"00cce186-6d61-46a7-9761-4c6180938401\" (UID: \"00cce186-6d61-46a7-9761-4c6180938401\") " Nov 24 09:03:09 crc kubenswrapper[4691]: I1124 09:03:09.152694 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rlwrf\" (UniqueName: \"kubernetes.io/projected/00cce186-6d61-46a7-9761-4c6180938401-kube-api-access-rlwrf\") pod \"00cce186-6d61-46a7-9761-4c6180938401\" (UID: \"00cce186-6d61-46a7-9761-4c6180938401\") " Nov 24 09:03:09 crc kubenswrapper[4691]: I1124 09:03:09.152788 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00cce186-6d61-46a7-9761-4c6180938401-utilities\") pod \"00cce186-6d61-46a7-9761-4c6180938401\" (UID: \"00cce186-6d61-46a7-9761-4c6180938401\") " Nov 24 09:03:09 crc kubenswrapper[4691]: I1124 09:03:09.153782 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00cce186-6d61-46a7-9761-4c6180938401-utilities" (OuterVolumeSpecName: "utilities") pod "00cce186-6d61-46a7-9761-4c6180938401" (UID: "00cce186-6d61-46a7-9761-4c6180938401"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 09:03:09 crc kubenswrapper[4691]: I1124 09:03:09.173408 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00cce186-6d61-46a7-9761-4c6180938401-kube-api-access-rlwrf" (OuterVolumeSpecName: "kube-api-access-rlwrf") pod "00cce186-6d61-46a7-9761-4c6180938401" (UID: "00cce186-6d61-46a7-9761-4c6180938401"). InnerVolumeSpecName "kube-api-access-rlwrf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 09:03:09 crc kubenswrapper[4691]: I1124 09:03:09.220281 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00cce186-6d61-46a7-9761-4c6180938401-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "00cce186-6d61-46a7-9761-4c6180938401" (UID: "00cce186-6d61-46a7-9761-4c6180938401"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 09:03:09 crc kubenswrapper[4691]: I1124 09:03:09.256616 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00cce186-6d61-46a7-9761-4c6180938401-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 09:03:09 crc kubenswrapper[4691]: I1124 09:03:09.256733 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rlwrf\" (UniqueName: \"kubernetes.io/projected/00cce186-6d61-46a7-9761-4c6180938401-kube-api-access-rlwrf\") on node \"crc\" DevicePath \"\"" Nov 24 09:03:09 crc kubenswrapper[4691]: I1124 09:03:09.256751 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00cce186-6d61-46a7-9761-4c6180938401-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 09:03:10 crc kubenswrapper[4691]: I1124 09:03:10.091993 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sswd9" Nov 24 09:03:10 crc kubenswrapper[4691]: I1124 09:03:10.126964 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-sswd9"] Nov 24 09:03:10 crc kubenswrapper[4691]: I1124 09:03:10.134617 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-sswd9"] Nov 24 09:03:10 crc kubenswrapper[4691]: I1124 09:03:10.773482 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00cce186-6d61-46a7-9761-4c6180938401" path="/var/lib/kubelet/pods/00cce186-6d61-46a7-9761-4c6180938401/volumes" Nov 24 09:03:44 crc kubenswrapper[4691]: I1124 09:03:44.919537 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qnbnr"] Nov 24 09:03:44 crc kubenswrapper[4691]: E1124 09:03:44.920546 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00cce186-6d61-46a7-9761-4c6180938401" containerName="extract-utilities" Nov 24 09:03:44 crc kubenswrapper[4691]: I1124 09:03:44.920562 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="00cce186-6d61-46a7-9761-4c6180938401" containerName="extract-utilities" Nov 24 09:03:44 crc kubenswrapper[4691]: E1124 09:03:44.920577 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00cce186-6d61-46a7-9761-4c6180938401" containerName="registry-server" Nov 24 09:03:44 crc kubenswrapper[4691]: I1124 09:03:44.920584 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="00cce186-6d61-46a7-9761-4c6180938401" containerName="registry-server" Nov 24 09:03:44 crc kubenswrapper[4691]: E1124 09:03:44.920608 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00cce186-6d61-46a7-9761-4c6180938401" containerName="extract-content" Nov 24 09:03:44 crc kubenswrapper[4691]: I1124 09:03:44.920616 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="00cce186-6d61-46a7-9761-4c6180938401" containerName="extract-content" Nov 24 09:03:44 crc kubenswrapper[4691]: I1124 09:03:44.920892 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="00cce186-6d61-46a7-9761-4c6180938401" containerName="registry-server" Nov 24 09:03:44 crc kubenswrapper[4691]: I1124 09:03:44.924476 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qnbnr" Nov 24 09:03:44 crc kubenswrapper[4691]: I1124 09:03:44.946053 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qnbnr"] Nov 24 09:03:44 crc kubenswrapper[4691]: I1124 09:03:44.968542 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b51c0214-d377-46b7-a95f-5d7eabba2646-utilities\") pod \"certified-operators-qnbnr\" (UID: \"b51c0214-d377-46b7-a95f-5d7eabba2646\") " pod="openshift-marketplace/certified-operators-qnbnr" Nov 24 09:03:44 crc kubenswrapper[4691]: I1124 09:03:44.968742 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tbzcw\" (UniqueName: \"kubernetes.io/projected/b51c0214-d377-46b7-a95f-5d7eabba2646-kube-api-access-tbzcw\") pod \"certified-operators-qnbnr\" (UID: \"b51c0214-d377-46b7-a95f-5d7eabba2646\") " pod="openshift-marketplace/certified-operators-qnbnr" Nov 24 09:03:44 crc kubenswrapper[4691]: I1124 09:03:44.968901 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b51c0214-d377-46b7-a95f-5d7eabba2646-catalog-content\") pod \"certified-operators-qnbnr\" (UID: \"b51c0214-d377-46b7-a95f-5d7eabba2646\") " pod="openshift-marketplace/certified-operators-qnbnr" Nov 24 09:03:45 crc kubenswrapper[4691]: I1124 09:03:45.069799 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tbzcw\" (UniqueName: \"kubernetes.io/projected/b51c0214-d377-46b7-a95f-5d7eabba2646-kube-api-access-tbzcw\") pod \"certified-operators-qnbnr\" (UID: \"b51c0214-d377-46b7-a95f-5d7eabba2646\") " pod="openshift-marketplace/certified-operators-qnbnr" Nov 24 09:03:45 crc kubenswrapper[4691]: I1124 09:03:45.069910 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b51c0214-d377-46b7-a95f-5d7eabba2646-catalog-content\") pod \"certified-operators-qnbnr\" (UID: \"b51c0214-d377-46b7-a95f-5d7eabba2646\") " pod="openshift-marketplace/certified-operators-qnbnr" Nov 24 09:03:45 crc kubenswrapper[4691]: I1124 09:03:45.069969 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b51c0214-d377-46b7-a95f-5d7eabba2646-utilities\") pod \"certified-operators-qnbnr\" (UID: \"b51c0214-d377-46b7-a95f-5d7eabba2646\") " pod="openshift-marketplace/certified-operators-qnbnr" Nov 24 09:03:45 crc kubenswrapper[4691]: I1124 09:03:45.070404 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b51c0214-d377-46b7-a95f-5d7eabba2646-utilities\") pod \"certified-operators-qnbnr\" (UID: \"b51c0214-d377-46b7-a95f-5d7eabba2646\") " pod="openshift-marketplace/certified-operators-qnbnr" Nov 24 09:03:45 crc kubenswrapper[4691]: I1124 09:03:45.070638 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b51c0214-d377-46b7-a95f-5d7eabba2646-catalog-content\") pod \"certified-operators-qnbnr\" (UID: \"b51c0214-d377-46b7-a95f-5d7eabba2646\") " pod="openshift-marketplace/certified-operators-qnbnr" Nov 24 09:03:45 crc kubenswrapper[4691]: I1124 09:03:45.459417 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tbzcw\" (UniqueName: \"kubernetes.io/projected/b51c0214-d377-46b7-a95f-5d7eabba2646-kube-api-access-tbzcw\") pod \"certified-operators-qnbnr\" (UID: \"b51c0214-d377-46b7-a95f-5d7eabba2646\") " pod="openshift-marketplace/certified-operators-qnbnr" Nov 24 09:03:45 crc kubenswrapper[4691]: I1124 09:03:45.554306 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qnbnr" Nov 24 09:03:46 crc kubenswrapper[4691]: I1124 09:03:46.085643 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qnbnr"] Nov 24 09:03:46 crc kubenswrapper[4691]: I1124 09:03:46.414772 4691 generic.go:334] "Generic (PLEG): container finished" podID="b51c0214-d377-46b7-a95f-5d7eabba2646" containerID="301b43454952d929a081865e0911277815e3fc0e9404fae92897fa7dab44c3f7" exitCode=0 Nov 24 09:03:46 crc kubenswrapper[4691]: I1124 09:03:46.414824 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qnbnr" event={"ID":"b51c0214-d377-46b7-a95f-5d7eabba2646","Type":"ContainerDied","Data":"301b43454952d929a081865e0911277815e3fc0e9404fae92897fa7dab44c3f7"} Nov 24 09:03:46 crc kubenswrapper[4691]: I1124 09:03:46.414867 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qnbnr" event={"ID":"b51c0214-d377-46b7-a95f-5d7eabba2646","Type":"ContainerStarted","Data":"10147ec22464838e2aed35f9bb1ae429d3ca99fbb8ad37631934f1fc59019c4b"} Nov 24 09:03:47 crc kubenswrapper[4691]: I1124 09:03:47.425748 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qnbnr" event={"ID":"b51c0214-d377-46b7-a95f-5d7eabba2646","Type":"ContainerStarted","Data":"9492e0c86ade00d513c1bfd85f0ead423588414b52570e337adc75bf448a6cd8"} Nov 24 09:03:48 crc kubenswrapper[4691]: I1124 09:03:48.436354 4691 generic.go:334] "Generic (PLEG): container finished" podID="b51c0214-d377-46b7-a95f-5d7eabba2646" containerID="9492e0c86ade00d513c1bfd85f0ead423588414b52570e337adc75bf448a6cd8" exitCode=0 Nov 24 09:03:48 crc kubenswrapper[4691]: I1124 09:03:48.436465 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qnbnr" event={"ID":"b51c0214-d377-46b7-a95f-5d7eabba2646","Type":"ContainerDied","Data":"9492e0c86ade00d513c1bfd85f0ead423588414b52570e337adc75bf448a6cd8"} Nov 24 09:03:49 crc kubenswrapper[4691]: I1124 09:03:49.447730 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qnbnr" event={"ID":"b51c0214-d377-46b7-a95f-5d7eabba2646","Type":"ContainerStarted","Data":"0022d0e32409c8dede3677ce67e0dbdb977bff75f2c90841c2073c73bd4fc48a"} Nov 24 09:03:49 crc kubenswrapper[4691]: I1124 09:03:49.470727 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qnbnr" podStartSLOduration=2.6853196329999998 podStartE2EDuration="5.470709533s" podCreationTimestamp="2025-11-24 09:03:44 +0000 UTC" firstStartedPulling="2025-11-24 09:03:46.418149051 +0000 UTC m=+3988.417098300" lastFinishedPulling="2025-11-24 09:03:49.203538951 +0000 UTC m=+3991.202488200" observedRunningTime="2025-11-24 09:03:49.468338836 +0000 UTC m=+3991.467288095" watchObservedRunningTime="2025-11-24 09:03:49.470709533 +0000 UTC m=+3991.469658782" Nov 24 09:03:49 crc kubenswrapper[4691]: I1124 09:03:49.508770 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-wwkgc"] Nov 24 09:03:49 crc kubenswrapper[4691]: I1124 09:03:49.511055 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wwkgc" Nov 24 09:03:49 crc kubenswrapper[4691]: I1124 09:03:49.520386 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wwkgc"] Nov 24 09:03:49 crc kubenswrapper[4691]: I1124 09:03:49.657640 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/602beef7-9916-4f88-ba4f-9d67122b91e8-catalog-content\") pod \"redhat-marketplace-wwkgc\" (UID: \"602beef7-9916-4f88-ba4f-9d67122b91e8\") " pod="openshift-marketplace/redhat-marketplace-wwkgc" Nov 24 09:03:49 crc kubenswrapper[4691]: I1124 09:03:49.658639 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/602beef7-9916-4f88-ba4f-9d67122b91e8-utilities\") pod \"redhat-marketplace-wwkgc\" (UID: \"602beef7-9916-4f88-ba4f-9d67122b91e8\") " pod="openshift-marketplace/redhat-marketplace-wwkgc" Nov 24 09:03:49 crc kubenswrapper[4691]: I1124 09:03:49.658687 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tpsf8\" (UniqueName: \"kubernetes.io/projected/602beef7-9916-4f88-ba4f-9d67122b91e8-kube-api-access-tpsf8\") pod \"redhat-marketplace-wwkgc\" (UID: \"602beef7-9916-4f88-ba4f-9d67122b91e8\") " pod="openshift-marketplace/redhat-marketplace-wwkgc" Nov 24 09:03:49 crc kubenswrapper[4691]: I1124 09:03:49.761767 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/602beef7-9916-4f88-ba4f-9d67122b91e8-catalog-content\") pod \"redhat-marketplace-wwkgc\" (UID: \"602beef7-9916-4f88-ba4f-9d67122b91e8\") " pod="openshift-marketplace/redhat-marketplace-wwkgc" Nov 24 09:03:49 crc kubenswrapper[4691]: I1124 09:03:49.761827 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/602beef7-9916-4f88-ba4f-9d67122b91e8-utilities\") pod \"redhat-marketplace-wwkgc\" (UID: \"602beef7-9916-4f88-ba4f-9d67122b91e8\") " pod="openshift-marketplace/redhat-marketplace-wwkgc" Nov 24 09:03:49 crc kubenswrapper[4691]: I1124 09:03:49.761885 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tpsf8\" (UniqueName: \"kubernetes.io/projected/602beef7-9916-4f88-ba4f-9d67122b91e8-kube-api-access-tpsf8\") pod \"redhat-marketplace-wwkgc\" (UID: \"602beef7-9916-4f88-ba4f-9d67122b91e8\") " pod="openshift-marketplace/redhat-marketplace-wwkgc" Nov 24 09:03:49 crc kubenswrapper[4691]: I1124 09:03:49.762305 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/602beef7-9916-4f88-ba4f-9d67122b91e8-catalog-content\") pod \"redhat-marketplace-wwkgc\" (UID: \"602beef7-9916-4f88-ba4f-9d67122b91e8\") " pod="openshift-marketplace/redhat-marketplace-wwkgc" Nov 24 09:03:49 crc kubenswrapper[4691]: I1124 09:03:49.762346 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/602beef7-9916-4f88-ba4f-9d67122b91e8-utilities\") pod \"redhat-marketplace-wwkgc\" (UID: \"602beef7-9916-4f88-ba4f-9d67122b91e8\") " pod="openshift-marketplace/redhat-marketplace-wwkgc" Nov 24 09:03:49 crc kubenswrapper[4691]: I1124 09:03:49.786323 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tpsf8\" (UniqueName: \"kubernetes.io/projected/602beef7-9916-4f88-ba4f-9d67122b91e8-kube-api-access-tpsf8\") pod \"redhat-marketplace-wwkgc\" (UID: \"602beef7-9916-4f88-ba4f-9d67122b91e8\") " pod="openshift-marketplace/redhat-marketplace-wwkgc" Nov 24 09:03:49 crc kubenswrapper[4691]: I1124 09:03:49.833874 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wwkgc" Nov 24 09:03:50 crc kubenswrapper[4691]: I1124 09:03:50.328354 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wwkgc"] Nov 24 09:03:50 crc kubenswrapper[4691]: I1124 09:03:50.459077 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wwkgc" event={"ID":"602beef7-9916-4f88-ba4f-9d67122b91e8","Type":"ContainerStarted","Data":"729521f763d5997d810791e9836d52c1cbd090425d98f8532deca7ae232d7398"} Nov 24 09:03:51 crc kubenswrapper[4691]: I1124 09:03:51.471906 4691 generic.go:334] "Generic (PLEG): container finished" podID="602beef7-9916-4f88-ba4f-9d67122b91e8" containerID="c77925fd20ec32d0c1f02196f805282dbd143962d30d801cb89cb9276c03e851" exitCode=0 Nov 24 09:03:51 crc kubenswrapper[4691]: I1124 09:03:51.471990 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wwkgc" event={"ID":"602beef7-9916-4f88-ba4f-9d67122b91e8","Type":"ContainerDied","Data":"c77925fd20ec32d0c1f02196f805282dbd143962d30d801cb89cb9276c03e851"} Nov 24 09:03:53 crc kubenswrapper[4691]: I1124 09:03:53.490254 4691 generic.go:334] "Generic (PLEG): container finished" podID="602beef7-9916-4f88-ba4f-9d67122b91e8" containerID="da217be8ccd890355f50b8629e4982be7345293f40f204410989ceef2a3fa1da" exitCode=0 Nov 24 09:03:53 crc kubenswrapper[4691]: I1124 09:03:53.490355 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wwkgc" event={"ID":"602beef7-9916-4f88-ba4f-9d67122b91e8","Type":"ContainerDied","Data":"da217be8ccd890355f50b8629e4982be7345293f40f204410989ceef2a3fa1da"} Nov 24 09:03:55 crc kubenswrapper[4691]: I1124 09:03:55.520428 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wwkgc" event={"ID":"602beef7-9916-4f88-ba4f-9d67122b91e8","Type":"ContainerStarted","Data":"761206ffcae7f8a089fe63065e8f65e57553462eb8f15a68af5304f3a2100065"} Nov 24 09:03:55 crc kubenswrapper[4691]: I1124 09:03:55.546489 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-wwkgc" podStartSLOduration=4.121871831 podStartE2EDuration="6.546447291s" podCreationTimestamp="2025-11-24 09:03:49 +0000 UTC" firstStartedPulling="2025-11-24 09:03:51.474153355 +0000 UTC m=+3993.473102604" lastFinishedPulling="2025-11-24 09:03:53.898728815 +0000 UTC m=+3995.897678064" observedRunningTime="2025-11-24 09:03:55.542420396 +0000 UTC m=+3997.541369655" watchObservedRunningTime="2025-11-24 09:03:55.546447291 +0000 UTC m=+3997.545396540" Nov 24 09:03:55 crc kubenswrapper[4691]: I1124 09:03:55.555500 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qnbnr" Nov 24 09:03:55 crc kubenswrapper[4691]: I1124 09:03:55.555559 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qnbnr" Nov 24 09:03:55 crc kubenswrapper[4691]: I1124 09:03:55.608016 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qnbnr" Nov 24 09:03:56 crc kubenswrapper[4691]: I1124 09:03:56.571839 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qnbnr" Nov 24 09:03:56 crc kubenswrapper[4691]: I1124 09:03:56.902092 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qnbnr"] Nov 24 09:03:58 crc kubenswrapper[4691]: I1124 09:03:58.545745 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-qnbnr" podUID="b51c0214-d377-46b7-a95f-5d7eabba2646" containerName="registry-server" containerID="cri-o://0022d0e32409c8dede3677ce67e0dbdb977bff75f2c90841c2073c73bd4fc48a" gracePeriod=2 Nov 24 09:03:59 crc kubenswrapper[4691]: I1124 09:03:59.557053 4691 generic.go:334] "Generic (PLEG): container finished" podID="b51c0214-d377-46b7-a95f-5d7eabba2646" containerID="0022d0e32409c8dede3677ce67e0dbdb977bff75f2c90841c2073c73bd4fc48a" exitCode=0 Nov 24 09:03:59 crc kubenswrapper[4691]: I1124 09:03:59.557111 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qnbnr" event={"ID":"b51c0214-d377-46b7-a95f-5d7eabba2646","Type":"ContainerDied","Data":"0022d0e32409c8dede3677ce67e0dbdb977bff75f2c90841c2073c73bd4fc48a"} Nov 24 09:03:59 crc kubenswrapper[4691]: I1124 09:03:59.694256 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qnbnr" Nov 24 09:03:59 crc kubenswrapper[4691]: I1124 09:03:59.834590 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-wwkgc" Nov 24 09:03:59 crc kubenswrapper[4691]: I1124 09:03:59.835097 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-wwkgc" Nov 24 09:03:59 crc kubenswrapper[4691]: I1124 09:03:59.835847 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b51c0214-d377-46b7-a95f-5d7eabba2646-utilities\") pod \"b51c0214-d377-46b7-a95f-5d7eabba2646\" (UID: \"b51c0214-d377-46b7-a95f-5d7eabba2646\") " Nov 24 09:03:59 crc kubenswrapper[4691]: I1124 09:03:59.835905 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b51c0214-d377-46b7-a95f-5d7eabba2646-catalog-content\") pod \"b51c0214-d377-46b7-a95f-5d7eabba2646\" (UID: \"b51c0214-d377-46b7-a95f-5d7eabba2646\") " Nov 24 09:03:59 crc kubenswrapper[4691]: I1124 09:03:59.836022 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tbzcw\" (UniqueName: \"kubernetes.io/projected/b51c0214-d377-46b7-a95f-5d7eabba2646-kube-api-access-tbzcw\") pod \"b51c0214-d377-46b7-a95f-5d7eabba2646\" (UID: \"b51c0214-d377-46b7-a95f-5d7eabba2646\") " Nov 24 09:03:59 crc kubenswrapper[4691]: I1124 09:03:59.837159 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b51c0214-d377-46b7-a95f-5d7eabba2646-utilities" (OuterVolumeSpecName: "utilities") pod "b51c0214-d377-46b7-a95f-5d7eabba2646" (UID: "b51c0214-d377-46b7-a95f-5d7eabba2646"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 09:03:59 crc kubenswrapper[4691]: I1124 09:03:59.848183 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b51c0214-d377-46b7-a95f-5d7eabba2646-kube-api-access-tbzcw" (OuterVolumeSpecName: "kube-api-access-tbzcw") pod "b51c0214-d377-46b7-a95f-5d7eabba2646" (UID: "b51c0214-d377-46b7-a95f-5d7eabba2646"). InnerVolumeSpecName "kube-api-access-tbzcw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 09:03:59 crc kubenswrapper[4691]: I1124 09:03:59.898952 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b51c0214-d377-46b7-a95f-5d7eabba2646-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b51c0214-d377-46b7-a95f-5d7eabba2646" (UID: "b51c0214-d377-46b7-a95f-5d7eabba2646"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 09:03:59 crc kubenswrapper[4691]: I1124 09:03:59.911371 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-wwkgc" Nov 24 09:03:59 crc kubenswrapper[4691]: I1124 09:03:59.940684 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tbzcw\" (UniqueName: \"kubernetes.io/projected/b51c0214-d377-46b7-a95f-5d7eabba2646-kube-api-access-tbzcw\") on node \"crc\" DevicePath \"\"" Nov 24 09:03:59 crc kubenswrapper[4691]: I1124 09:03:59.940745 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b51c0214-d377-46b7-a95f-5d7eabba2646-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 09:03:59 crc kubenswrapper[4691]: I1124 09:03:59.942097 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b51c0214-d377-46b7-a95f-5d7eabba2646-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 09:04:00 crc kubenswrapper[4691]: I1124 09:04:00.570450 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qnbnr" event={"ID":"b51c0214-d377-46b7-a95f-5d7eabba2646","Type":"ContainerDied","Data":"10147ec22464838e2aed35f9bb1ae429d3ca99fbb8ad37631934f1fc59019c4b"} Nov 24 09:04:00 crc kubenswrapper[4691]: I1124 09:04:00.570522 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qnbnr" Nov 24 09:04:00 crc kubenswrapper[4691]: I1124 09:04:00.570568 4691 scope.go:117] "RemoveContainer" containerID="0022d0e32409c8dede3677ce67e0dbdb977bff75f2c90841c2073c73bd4fc48a" Nov 24 09:04:00 crc kubenswrapper[4691]: I1124 09:04:00.614854 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qnbnr"] Nov 24 09:04:00 crc kubenswrapper[4691]: I1124 09:04:00.617937 4691 scope.go:117] "RemoveContainer" containerID="9492e0c86ade00d513c1bfd85f0ead423588414b52570e337adc75bf448a6cd8" Nov 24 09:04:00 crc kubenswrapper[4691]: I1124 09:04:00.624334 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-qnbnr"] Nov 24 09:04:00 crc kubenswrapper[4691]: I1124 09:04:00.640989 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-wwkgc" Nov 24 09:04:00 crc kubenswrapper[4691]: I1124 09:04:00.644607 4691 scope.go:117] "RemoveContainer" containerID="301b43454952d929a081865e0911277815e3fc0e9404fae92897fa7dab44c3f7" Nov 24 09:04:00 crc kubenswrapper[4691]: I1124 09:04:00.772822 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b51c0214-d377-46b7-a95f-5d7eabba2646" path="/var/lib/kubelet/pods/b51c0214-d377-46b7-a95f-5d7eabba2646/volumes" Nov 24 09:04:02 crc kubenswrapper[4691]: I1124 09:04:02.899889 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wwkgc"] Nov 24 09:04:02 crc kubenswrapper[4691]: I1124 09:04:02.900162 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-wwkgc" podUID="602beef7-9916-4f88-ba4f-9d67122b91e8" containerName="registry-server" containerID="cri-o://761206ffcae7f8a089fe63065e8f65e57553462eb8f15a68af5304f3a2100065" gracePeriod=2 Nov 24 09:04:03 crc kubenswrapper[4691]: I1124 09:04:03.403277 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wwkgc" Nov 24 09:04:03 crc kubenswrapper[4691]: I1124 09:04:03.508489 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tpsf8\" (UniqueName: \"kubernetes.io/projected/602beef7-9916-4f88-ba4f-9d67122b91e8-kube-api-access-tpsf8\") pod \"602beef7-9916-4f88-ba4f-9d67122b91e8\" (UID: \"602beef7-9916-4f88-ba4f-9d67122b91e8\") " Nov 24 09:04:03 crc kubenswrapper[4691]: I1124 09:04:03.508548 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/602beef7-9916-4f88-ba4f-9d67122b91e8-utilities\") pod \"602beef7-9916-4f88-ba4f-9d67122b91e8\" (UID: \"602beef7-9916-4f88-ba4f-9d67122b91e8\") " Nov 24 09:04:03 crc kubenswrapper[4691]: I1124 09:04:03.508577 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/602beef7-9916-4f88-ba4f-9d67122b91e8-catalog-content\") pod \"602beef7-9916-4f88-ba4f-9d67122b91e8\" (UID: \"602beef7-9916-4f88-ba4f-9d67122b91e8\") " Nov 24 09:04:03 crc kubenswrapper[4691]: I1124 09:04:03.509617 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/602beef7-9916-4f88-ba4f-9d67122b91e8-utilities" (OuterVolumeSpecName: "utilities") pod "602beef7-9916-4f88-ba4f-9d67122b91e8" (UID: "602beef7-9916-4f88-ba4f-9d67122b91e8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 09:04:03 crc kubenswrapper[4691]: I1124 09:04:03.516723 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/602beef7-9916-4f88-ba4f-9d67122b91e8-kube-api-access-tpsf8" (OuterVolumeSpecName: "kube-api-access-tpsf8") pod "602beef7-9916-4f88-ba4f-9d67122b91e8" (UID: "602beef7-9916-4f88-ba4f-9d67122b91e8"). InnerVolumeSpecName "kube-api-access-tpsf8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 09:04:03 crc kubenswrapper[4691]: I1124 09:04:03.532893 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/602beef7-9916-4f88-ba4f-9d67122b91e8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "602beef7-9916-4f88-ba4f-9d67122b91e8" (UID: "602beef7-9916-4f88-ba4f-9d67122b91e8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 09:04:03 crc kubenswrapper[4691]: I1124 09:04:03.602725 4691 generic.go:334] "Generic (PLEG): container finished" podID="602beef7-9916-4f88-ba4f-9d67122b91e8" containerID="761206ffcae7f8a089fe63065e8f65e57553462eb8f15a68af5304f3a2100065" exitCode=0 Nov 24 09:04:03 crc kubenswrapper[4691]: I1124 09:04:03.602807 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wwkgc" Nov 24 09:04:03 crc kubenswrapper[4691]: I1124 09:04:03.602828 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wwkgc" event={"ID":"602beef7-9916-4f88-ba4f-9d67122b91e8","Type":"ContainerDied","Data":"761206ffcae7f8a089fe63065e8f65e57553462eb8f15a68af5304f3a2100065"} Nov 24 09:04:03 crc kubenswrapper[4691]: I1124 09:04:03.603159 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wwkgc" event={"ID":"602beef7-9916-4f88-ba4f-9d67122b91e8","Type":"ContainerDied","Data":"729521f763d5997d810791e9836d52c1cbd090425d98f8532deca7ae232d7398"} Nov 24 09:04:03 crc kubenswrapper[4691]: I1124 09:04:03.603227 4691 scope.go:117] "RemoveContainer" containerID="761206ffcae7f8a089fe63065e8f65e57553462eb8f15a68af5304f3a2100065" Nov 24 09:04:03 crc kubenswrapper[4691]: I1124 09:04:03.610658 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tpsf8\" (UniqueName: \"kubernetes.io/projected/602beef7-9916-4f88-ba4f-9d67122b91e8-kube-api-access-tpsf8\") on node \"crc\" DevicePath \"\"" Nov 24 09:04:03 crc kubenswrapper[4691]: I1124 09:04:03.610691 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/602beef7-9916-4f88-ba4f-9d67122b91e8-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 09:04:03 crc kubenswrapper[4691]: I1124 09:04:03.610701 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/602beef7-9916-4f88-ba4f-9d67122b91e8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 09:04:03 crc kubenswrapper[4691]: I1124 09:04:03.625123 4691 scope.go:117] "RemoveContainer" containerID="da217be8ccd890355f50b8629e4982be7345293f40f204410989ceef2a3fa1da" Nov 24 09:04:03 crc kubenswrapper[4691]: I1124 09:04:03.641741 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wwkgc"] Nov 24 09:04:03 crc kubenswrapper[4691]: I1124 09:04:03.650401 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-wwkgc"] Nov 24 09:04:03 crc kubenswrapper[4691]: I1124 09:04:03.661019 4691 scope.go:117] "RemoveContainer" containerID="c77925fd20ec32d0c1f02196f805282dbd143962d30d801cb89cb9276c03e851" Nov 24 09:04:03 crc kubenswrapper[4691]: I1124 09:04:03.699300 4691 scope.go:117] "RemoveContainer" containerID="761206ffcae7f8a089fe63065e8f65e57553462eb8f15a68af5304f3a2100065" Nov 24 09:04:03 crc kubenswrapper[4691]: E1124 09:04:03.699757 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"761206ffcae7f8a089fe63065e8f65e57553462eb8f15a68af5304f3a2100065\": container with ID starting with 761206ffcae7f8a089fe63065e8f65e57553462eb8f15a68af5304f3a2100065 not found: ID does not exist" containerID="761206ffcae7f8a089fe63065e8f65e57553462eb8f15a68af5304f3a2100065" Nov 24 09:04:03 crc kubenswrapper[4691]: I1124 09:04:03.699794 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"761206ffcae7f8a089fe63065e8f65e57553462eb8f15a68af5304f3a2100065"} err="failed to get container status \"761206ffcae7f8a089fe63065e8f65e57553462eb8f15a68af5304f3a2100065\": rpc error: code = NotFound desc = could not find container \"761206ffcae7f8a089fe63065e8f65e57553462eb8f15a68af5304f3a2100065\": container with ID starting with 761206ffcae7f8a089fe63065e8f65e57553462eb8f15a68af5304f3a2100065 not found: ID does not exist" Nov 24 09:04:03 crc kubenswrapper[4691]: I1124 09:04:03.699821 4691 scope.go:117] "RemoveContainer" containerID="da217be8ccd890355f50b8629e4982be7345293f40f204410989ceef2a3fa1da" Nov 24 09:04:03 crc kubenswrapper[4691]: E1124 09:04:03.700196 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"da217be8ccd890355f50b8629e4982be7345293f40f204410989ceef2a3fa1da\": container with ID starting with da217be8ccd890355f50b8629e4982be7345293f40f204410989ceef2a3fa1da not found: ID does not exist" containerID="da217be8ccd890355f50b8629e4982be7345293f40f204410989ceef2a3fa1da" Nov 24 09:04:03 crc kubenswrapper[4691]: I1124 09:04:03.700294 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"da217be8ccd890355f50b8629e4982be7345293f40f204410989ceef2a3fa1da"} err="failed to get container status \"da217be8ccd890355f50b8629e4982be7345293f40f204410989ceef2a3fa1da\": rpc error: code = NotFound desc = could not find container \"da217be8ccd890355f50b8629e4982be7345293f40f204410989ceef2a3fa1da\": container with ID starting with da217be8ccd890355f50b8629e4982be7345293f40f204410989ceef2a3fa1da not found: ID does not exist" Nov 24 09:04:03 crc kubenswrapper[4691]: I1124 09:04:03.700372 4691 scope.go:117] "RemoveContainer" containerID="c77925fd20ec32d0c1f02196f805282dbd143962d30d801cb89cb9276c03e851" Nov 24 09:04:03 crc kubenswrapper[4691]: E1124 09:04:03.700873 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c77925fd20ec32d0c1f02196f805282dbd143962d30d801cb89cb9276c03e851\": container with ID starting with c77925fd20ec32d0c1f02196f805282dbd143962d30d801cb89cb9276c03e851 not found: ID does not exist" containerID="c77925fd20ec32d0c1f02196f805282dbd143962d30d801cb89cb9276c03e851" Nov 24 09:04:03 crc kubenswrapper[4691]: I1124 09:04:03.700907 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c77925fd20ec32d0c1f02196f805282dbd143962d30d801cb89cb9276c03e851"} err="failed to get container status \"c77925fd20ec32d0c1f02196f805282dbd143962d30d801cb89cb9276c03e851\": rpc error: code = NotFound desc = could not find container \"c77925fd20ec32d0c1f02196f805282dbd143962d30d801cb89cb9276c03e851\": container with ID starting with c77925fd20ec32d0c1f02196f805282dbd143962d30d801cb89cb9276c03e851 not found: ID does not exist" Nov 24 09:04:04 crc kubenswrapper[4691]: I1124 09:04:04.772252 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="602beef7-9916-4f88-ba4f-9d67122b91e8" path="/var/lib/kubelet/pods/602beef7-9916-4f88-ba4f-9d67122b91e8/volumes" Nov 24 09:04:51 crc kubenswrapper[4691]: I1124 09:04:51.089045 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 09:04:51 crc kubenswrapper[4691]: I1124 09:04:51.089710 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 09:05:21 crc kubenswrapper[4691]: I1124 09:05:21.090377 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 09:05:21 crc kubenswrapper[4691]: I1124 09:05:21.091344 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 09:05:51 crc kubenswrapper[4691]: I1124 09:05:51.089807 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 09:05:51 crc kubenswrapper[4691]: I1124 09:05:51.090805 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 09:05:51 crc kubenswrapper[4691]: I1124 09:05:51.090891 4691 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 09:05:51 crc kubenswrapper[4691]: I1124 09:05:51.092127 4691 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c89a895b1442fb66dae9bce4acd58e58a614f26478fea75325d9956aadd18bb2"} pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 09:05:51 crc kubenswrapper[4691]: I1124 09:05:51.092209 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" containerID="cri-o://c89a895b1442fb66dae9bce4acd58e58a614f26478fea75325d9956aadd18bb2" gracePeriod=600 Nov 24 09:05:51 crc kubenswrapper[4691]: I1124 09:05:51.691912 4691 generic.go:334] "Generic (PLEG): container finished" podID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerID="c89a895b1442fb66dae9bce4acd58e58a614f26478fea75325d9956aadd18bb2" exitCode=0 Nov 24 09:05:51 crc kubenswrapper[4691]: I1124 09:05:51.691948 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerDied","Data":"c89a895b1442fb66dae9bce4acd58e58a614f26478fea75325d9956aadd18bb2"} Nov 24 09:05:51 crc kubenswrapper[4691]: I1124 09:05:51.692010 4691 scope.go:117] "RemoveContainer" containerID="3d6ce131b8ac26b1cb7a5759952caf13c375831dd0462ac48f97930734760a1f" Nov 24 09:05:52 crc kubenswrapper[4691]: I1124 09:05:52.706123 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerStarted","Data":"abdae46e1c23786722cc1d9ee7b656223655944b4200188cd4ea22940b9ff61f"} Nov 24 09:08:21 crc kubenswrapper[4691]: I1124 09:08:21.089164 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 09:08:21 crc kubenswrapper[4691]: I1124 09:08:21.090021 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 09:08:51 crc kubenswrapper[4691]: I1124 09:08:51.089061 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 09:08:51 crc kubenswrapper[4691]: I1124 09:08:51.089665 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 09:09:21 crc kubenswrapper[4691]: I1124 09:09:21.088972 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 09:09:21 crc kubenswrapper[4691]: I1124 09:09:21.089540 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 09:09:21 crc kubenswrapper[4691]: I1124 09:09:21.089592 4691 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 09:09:21 crc kubenswrapper[4691]: I1124 09:09:21.090355 4691 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"abdae46e1c23786722cc1d9ee7b656223655944b4200188cd4ea22940b9ff61f"} pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 09:09:21 crc kubenswrapper[4691]: I1124 09:09:21.090412 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" containerID="cri-o://abdae46e1c23786722cc1d9ee7b656223655944b4200188cd4ea22940b9ff61f" gracePeriod=600 Nov 24 09:09:21 crc kubenswrapper[4691]: E1124 09:09:21.204067 4691 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod54ccc455_9127_4afd_b3a4_7fc35181bf93.slice/crio-conmon-abdae46e1c23786722cc1d9ee7b656223655944b4200188cd4ea22940b9ff61f.scope\": RecentStats: unable to find data in memory cache]" Nov 24 09:09:21 crc kubenswrapper[4691]: E1124 09:09:21.236995 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:09:21 crc kubenswrapper[4691]: I1124 09:09:21.788762 4691 generic.go:334] "Generic (PLEG): container finished" podID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerID="abdae46e1c23786722cc1d9ee7b656223655944b4200188cd4ea22940b9ff61f" exitCode=0 Nov 24 09:09:21 crc kubenswrapper[4691]: I1124 09:09:21.788927 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerDied","Data":"abdae46e1c23786722cc1d9ee7b656223655944b4200188cd4ea22940b9ff61f"} Nov 24 09:09:21 crc kubenswrapper[4691]: I1124 09:09:21.789184 4691 scope.go:117] "RemoveContainer" containerID="c89a895b1442fb66dae9bce4acd58e58a614f26478fea75325d9956aadd18bb2" Nov 24 09:09:21 crc kubenswrapper[4691]: I1124 09:09:21.790151 4691 scope.go:117] "RemoveContainer" containerID="abdae46e1c23786722cc1d9ee7b656223655944b4200188cd4ea22940b9ff61f" Nov 24 09:09:21 crc kubenswrapper[4691]: E1124 09:09:21.790550 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:09:24 crc kubenswrapper[4691]: I1124 09:09:24.709597 4691 scope.go:117] "RemoveContainer" containerID="53c2951efff93b253f714417dfcfa9eec3f2b6d373592141542f83b8f20ddff6" Nov 24 09:09:24 crc kubenswrapper[4691]: I1124 09:09:24.733197 4691 scope.go:117] "RemoveContainer" containerID="6cb3d1859d0dc70766ffcefa38fe633c7c8de5851d1cf0860cf198ce46dd0705" Nov 24 09:09:24 crc kubenswrapper[4691]: I1124 09:09:24.771091 4691 scope.go:117] "RemoveContainer" containerID="c8c471dd4e190d29f49df9f9a227a29f0e30fa61f5cc9a3f9cf77e61e64192ab" Nov 24 09:09:34 crc kubenswrapper[4691]: I1124 09:09:34.761099 4691 scope.go:117] "RemoveContainer" containerID="abdae46e1c23786722cc1d9ee7b656223655944b4200188cd4ea22940b9ff61f" Nov 24 09:09:34 crc kubenswrapper[4691]: E1124 09:09:34.761942 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:09:48 crc kubenswrapper[4691]: I1124 09:09:48.774789 4691 scope.go:117] "RemoveContainer" containerID="abdae46e1c23786722cc1d9ee7b656223655944b4200188cd4ea22940b9ff61f" Nov 24 09:09:48 crc kubenswrapper[4691]: E1124 09:09:48.775944 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:09:59 crc kubenswrapper[4691]: I1124 09:09:59.760691 4691 scope.go:117] "RemoveContainer" containerID="abdae46e1c23786722cc1d9ee7b656223655944b4200188cd4ea22940b9ff61f" Nov 24 09:09:59 crc kubenswrapper[4691]: E1124 09:09:59.761837 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:10:13 crc kubenswrapper[4691]: I1124 09:10:13.761007 4691 scope.go:117] "RemoveContainer" containerID="abdae46e1c23786722cc1d9ee7b656223655944b4200188cd4ea22940b9ff61f" Nov 24 09:10:13 crc kubenswrapper[4691]: E1124 09:10:13.761759 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:10:26 crc kubenswrapper[4691]: I1124 09:10:26.761224 4691 scope.go:117] "RemoveContainer" containerID="abdae46e1c23786722cc1d9ee7b656223655944b4200188cd4ea22940b9ff61f" Nov 24 09:10:26 crc kubenswrapper[4691]: E1124 09:10:26.762128 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:10:37 crc kubenswrapper[4691]: I1124 09:10:37.760878 4691 scope.go:117] "RemoveContainer" containerID="abdae46e1c23786722cc1d9ee7b656223655944b4200188cd4ea22940b9ff61f" Nov 24 09:10:37 crc kubenswrapper[4691]: E1124 09:10:37.761485 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:10:52 crc kubenswrapper[4691]: I1124 09:10:52.760889 4691 scope.go:117] "RemoveContainer" containerID="abdae46e1c23786722cc1d9ee7b656223655944b4200188cd4ea22940b9ff61f" Nov 24 09:10:52 crc kubenswrapper[4691]: E1124 09:10:52.761612 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:11:05 crc kubenswrapper[4691]: I1124 09:11:05.760870 4691 scope.go:117] "RemoveContainer" containerID="abdae46e1c23786722cc1d9ee7b656223655944b4200188cd4ea22940b9ff61f" Nov 24 09:11:05 crc kubenswrapper[4691]: E1124 09:11:05.761759 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:11:17 crc kubenswrapper[4691]: I1124 09:11:17.761429 4691 scope.go:117] "RemoveContainer" containerID="abdae46e1c23786722cc1d9ee7b656223655944b4200188cd4ea22940b9ff61f" Nov 24 09:11:17 crc kubenswrapper[4691]: E1124 09:11:17.763147 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:11:30 crc kubenswrapper[4691]: I1124 09:11:30.760197 4691 scope.go:117] "RemoveContainer" containerID="abdae46e1c23786722cc1d9ee7b656223655944b4200188cd4ea22940b9ff61f" Nov 24 09:11:30 crc kubenswrapper[4691]: E1124 09:11:30.760980 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:11:35 crc kubenswrapper[4691]: I1124 09:11:35.346297 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-pt2cl"] Nov 24 09:11:35 crc kubenswrapper[4691]: E1124 09:11:35.347393 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="602beef7-9916-4f88-ba4f-9d67122b91e8" containerName="registry-server" Nov 24 09:11:35 crc kubenswrapper[4691]: I1124 09:11:35.347414 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="602beef7-9916-4f88-ba4f-9d67122b91e8" containerName="registry-server" Nov 24 09:11:35 crc kubenswrapper[4691]: E1124 09:11:35.347442 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="602beef7-9916-4f88-ba4f-9d67122b91e8" containerName="extract-utilities" Nov 24 09:11:35 crc kubenswrapper[4691]: I1124 09:11:35.347491 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="602beef7-9916-4f88-ba4f-9d67122b91e8" containerName="extract-utilities" Nov 24 09:11:35 crc kubenswrapper[4691]: E1124 09:11:35.347515 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b51c0214-d377-46b7-a95f-5d7eabba2646" containerName="extract-utilities" Nov 24 09:11:35 crc kubenswrapper[4691]: I1124 09:11:35.347527 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="b51c0214-d377-46b7-a95f-5d7eabba2646" containerName="extract-utilities" Nov 24 09:11:35 crc kubenswrapper[4691]: E1124 09:11:35.347547 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="602beef7-9916-4f88-ba4f-9d67122b91e8" containerName="extract-content" Nov 24 09:11:35 crc kubenswrapper[4691]: I1124 09:11:35.347557 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="602beef7-9916-4f88-ba4f-9d67122b91e8" containerName="extract-content" Nov 24 09:11:35 crc kubenswrapper[4691]: E1124 09:11:35.347580 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b51c0214-d377-46b7-a95f-5d7eabba2646" containerName="registry-server" Nov 24 09:11:35 crc kubenswrapper[4691]: I1124 09:11:35.347589 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="b51c0214-d377-46b7-a95f-5d7eabba2646" containerName="registry-server" Nov 24 09:11:35 crc kubenswrapper[4691]: E1124 09:11:35.347607 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b51c0214-d377-46b7-a95f-5d7eabba2646" containerName="extract-content" Nov 24 09:11:35 crc kubenswrapper[4691]: I1124 09:11:35.347630 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="b51c0214-d377-46b7-a95f-5d7eabba2646" containerName="extract-content" Nov 24 09:11:35 crc kubenswrapper[4691]: I1124 09:11:35.347881 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="b51c0214-d377-46b7-a95f-5d7eabba2646" containerName="registry-server" Nov 24 09:11:35 crc kubenswrapper[4691]: I1124 09:11:35.347905 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="602beef7-9916-4f88-ba4f-9d67122b91e8" containerName="registry-server" Nov 24 09:11:35 crc kubenswrapper[4691]: I1124 09:11:35.350217 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pt2cl" Nov 24 09:11:35 crc kubenswrapper[4691]: I1124 09:11:35.386963 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pt2cl"] Nov 24 09:11:35 crc kubenswrapper[4691]: I1124 09:11:35.456375 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vmtk\" (UniqueName: \"kubernetes.io/projected/f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef-kube-api-access-8vmtk\") pod \"redhat-operators-pt2cl\" (UID: \"f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef\") " pod="openshift-marketplace/redhat-operators-pt2cl" Nov 24 09:11:35 crc kubenswrapper[4691]: I1124 09:11:35.456448 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef-catalog-content\") pod \"redhat-operators-pt2cl\" (UID: \"f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef\") " pod="openshift-marketplace/redhat-operators-pt2cl" Nov 24 09:11:35 crc kubenswrapper[4691]: I1124 09:11:35.456545 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef-utilities\") pod \"redhat-operators-pt2cl\" (UID: \"f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef\") " pod="openshift-marketplace/redhat-operators-pt2cl" Nov 24 09:11:35 crc kubenswrapper[4691]: I1124 09:11:35.558566 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef-utilities\") pod \"redhat-operators-pt2cl\" (UID: \"f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef\") " pod="openshift-marketplace/redhat-operators-pt2cl" Nov 24 09:11:35 crc kubenswrapper[4691]: I1124 09:11:35.558699 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vmtk\" (UniqueName: \"kubernetes.io/projected/f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef-kube-api-access-8vmtk\") pod \"redhat-operators-pt2cl\" (UID: \"f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef\") " pod="openshift-marketplace/redhat-operators-pt2cl" Nov 24 09:11:35 crc kubenswrapper[4691]: I1124 09:11:35.558755 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef-catalog-content\") pod \"redhat-operators-pt2cl\" (UID: \"f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef\") " pod="openshift-marketplace/redhat-operators-pt2cl" Nov 24 09:11:35 crc kubenswrapper[4691]: I1124 09:11:35.559403 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef-catalog-content\") pod \"redhat-operators-pt2cl\" (UID: \"f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef\") " pod="openshift-marketplace/redhat-operators-pt2cl" Nov 24 09:11:35 crc kubenswrapper[4691]: I1124 09:11:35.561823 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef-utilities\") pod \"redhat-operators-pt2cl\" (UID: \"f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef\") " pod="openshift-marketplace/redhat-operators-pt2cl" Nov 24 09:11:35 crc kubenswrapper[4691]: I1124 09:11:35.587988 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vmtk\" (UniqueName: \"kubernetes.io/projected/f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef-kube-api-access-8vmtk\") pod \"redhat-operators-pt2cl\" (UID: \"f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef\") " pod="openshift-marketplace/redhat-operators-pt2cl" Nov 24 09:11:35 crc kubenswrapper[4691]: I1124 09:11:35.696917 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pt2cl" Nov 24 09:11:36 crc kubenswrapper[4691]: I1124 09:11:36.220565 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pt2cl"] Nov 24 09:11:36 crc kubenswrapper[4691]: W1124 09:11:36.227116 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf5e922ac_b170_4ac8_bd35_2ba8ff78b3ef.slice/crio-a47c1281520876b2264656eb56cebc52db808421229ceac04ec42dd753a456a8 WatchSource:0}: Error finding container a47c1281520876b2264656eb56cebc52db808421229ceac04ec42dd753a456a8: Status 404 returned error can't find the container with id a47c1281520876b2264656eb56cebc52db808421229ceac04ec42dd753a456a8 Nov 24 09:11:37 crc kubenswrapper[4691]: I1124 09:11:37.032359 4691 generic.go:334] "Generic (PLEG): container finished" podID="f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef" containerID="2e21426469d7a885191e7824575610b107d66c091fa8ef7ed9fc8f9046bf40ff" exitCode=0 Nov 24 09:11:37 crc kubenswrapper[4691]: I1124 09:11:37.032458 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pt2cl" event={"ID":"f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef","Type":"ContainerDied","Data":"2e21426469d7a885191e7824575610b107d66c091fa8ef7ed9fc8f9046bf40ff"} Nov 24 09:11:37 crc kubenswrapper[4691]: I1124 09:11:37.034095 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pt2cl" event={"ID":"f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef","Type":"ContainerStarted","Data":"a47c1281520876b2264656eb56cebc52db808421229ceac04ec42dd753a456a8"} Nov 24 09:11:37 crc kubenswrapper[4691]: I1124 09:11:37.038706 4691 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 09:11:39 crc kubenswrapper[4691]: I1124 09:11:39.057161 4691 generic.go:334] "Generic (PLEG): container finished" podID="f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef" containerID="f076789b46a5f1f237dc7771af88986471faed0ba440cef9346cd05c0148da81" exitCode=0 Nov 24 09:11:39 crc kubenswrapper[4691]: I1124 09:11:39.057494 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pt2cl" event={"ID":"f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef","Type":"ContainerDied","Data":"f076789b46a5f1f237dc7771af88986471faed0ba440cef9346cd05c0148da81"} Nov 24 09:11:41 crc kubenswrapper[4691]: I1124 09:11:41.088344 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pt2cl" event={"ID":"f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef","Type":"ContainerStarted","Data":"00b10c01caccd30a22ae192f787e715d434eadc997639a58231d5973173b1ede"} Nov 24 09:11:41 crc kubenswrapper[4691]: I1124 09:11:41.115288 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-pt2cl" podStartSLOduration=3.672397479 podStartE2EDuration="6.115269311s" podCreationTimestamp="2025-11-24 09:11:35 +0000 UTC" firstStartedPulling="2025-11-24 09:11:37.038468226 +0000 UTC m=+4459.037417475" lastFinishedPulling="2025-11-24 09:11:39.481340068 +0000 UTC m=+4461.480289307" observedRunningTime="2025-11-24 09:11:41.111866024 +0000 UTC m=+4463.110815283" watchObservedRunningTime="2025-11-24 09:11:41.115269311 +0000 UTC m=+4463.114218570" Nov 24 09:11:44 crc kubenswrapper[4691]: I1124 09:11:44.760919 4691 scope.go:117] "RemoveContainer" containerID="abdae46e1c23786722cc1d9ee7b656223655944b4200188cd4ea22940b9ff61f" Nov 24 09:11:44 crc kubenswrapper[4691]: E1124 09:11:44.761814 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:11:45 crc kubenswrapper[4691]: I1124 09:11:45.697612 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-pt2cl" Nov 24 09:11:45 crc kubenswrapper[4691]: I1124 09:11:45.697686 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-pt2cl" Nov 24 09:11:46 crc kubenswrapper[4691]: I1124 09:11:46.756153 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pt2cl" podUID="f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef" containerName="registry-server" probeResult="failure" output=< Nov 24 09:11:46 crc kubenswrapper[4691]: timeout: failed to connect service ":50051" within 1s Nov 24 09:11:46 crc kubenswrapper[4691]: > Nov 24 09:11:55 crc kubenswrapper[4691]: I1124 09:11:55.767152 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-pt2cl" Nov 24 09:11:55 crc kubenswrapper[4691]: I1124 09:11:55.820874 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-pt2cl" Nov 24 09:11:56 crc kubenswrapper[4691]: I1124 09:11:56.002103 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pt2cl"] Nov 24 09:11:56 crc kubenswrapper[4691]: I1124 09:11:56.760806 4691 scope.go:117] "RemoveContainer" containerID="abdae46e1c23786722cc1d9ee7b656223655944b4200188cd4ea22940b9ff61f" Nov 24 09:11:56 crc kubenswrapper[4691]: E1124 09:11:56.761257 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:11:57 crc kubenswrapper[4691]: I1124 09:11:57.249692 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-pt2cl" podUID="f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef" containerName="registry-server" containerID="cri-o://00b10c01caccd30a22ae192f787e715d434eadc997639a58231d5973173b1ede" gracePeriod=2 Nov 24 09:11:57 crc kubenswrapper[4691]: I1124 09:11:57.739578 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pt2cl" Nov 24 09:11:57 crc kubenswrapper[4691]: I1124 09:11:57.891300 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef-catalog-content\") pod \"f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef\" (UID: \"f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef\") " Nov 24 09:11:57 crc kubenswrapper[4691]: I1124 09:11:57.891703 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8vmtk\" (UniqueName: \"kubernetes.io/projected/f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef-kube-api-access-8vmtk\") pod \"f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef\" (UID: \"f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef\") " Nov 24 09:11:57 crc kubenswrapper[4691]: I1124 09:11:57.891738 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef-utilities\") pod \"f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef\" (UID: \"f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef\") " Nov 24 09:11:57 crc kubenswrapper[4691]: I1124 09:11:57.893698 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef-utilities" (OuterVolumeSpecName: "utilities") pod "f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef" (UID: "f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 09:11:57 crc kubenswrapper[4691]: I1124 09:11:57.896884 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef-kube-api-access-8vmtk" (OuterVolumeSpecName: "kube-api-access-8vmtk") pod "f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef" (UID: "f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef"). InnerVolumeSpecName "kube-api-access-8vmtk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 09:11:57 crc kubenswrapper[4691]: I1124 09:11:57.990949 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef" (UID: "f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 09:11:57 crc kubenswrapper[4691]: I1124 09:11:57.994703 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 09:11:57 crc kubenswrapper[4691]: I1124 09:11:57.994957 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8vmtk\" (UniqueName: \"kubernetes.io/projected/f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef-kube-api-access-8vmtk\") on node \"crc\" DevicePath \"\"" Nov 24 09:11:57 crc kubenswrapper[4691]: I1124 09:11:57.995033 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 09:11:58 crc kubenswrapper[4691]: I1124 09:11:58.261619 4691 generic.go:334] "Generic (PLEG): container finished" podID="f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef" containerID="00b10c01caccd30a22ae192f787e715d434eadc997639a58231d5973173b1ede" exitCode=0 Nov 24 09:11:58 crc kubenswrapper[4691]: I1124 09:11:58.261666 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pt2cl" event={"ID":"f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef","Type":"ContainerDied","Data":"00b10c01caccd30a22ae192f787e715d434eadc997639a58231d5973173b1ede"} Nov 24 09:11:58 crc kubenswrapper[4691]: I1124 09:11:58.261696 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pt2cl" event={"ID":"f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef","Type":"ContainerDied","Data":"a47c1281520876b2264656eb56cebc52db808421229ceac04ec42dd753a456a8"} Nov 24 09:11:58 crc kubenswrapper[4691]: I1124 09:11:58.261715 4691 scope.go:117] "RemoveContainer" containerID="00b10c01caccd30a22ae192f787e715d434eadc997639a58231d5973173b1ede" Nov 24 09:11:58 crc kubenswrapper[4691]: I1124 09:11:58.261719 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pt2cl" Nov 24 09:11:58 crc kubenswrapper[4691]: I1124 09:11:58.284941 4691 scope.go:117] "RemoveContainer" containerID="f076789b46a5f1f237dc7771af88986471faed0ba440cef9346cd05c0148da81" Nov 24 09:11:58 crc kubenswrapper[4691]: I1124 09:11:58.298807 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pt2cl"] Nov 24 09:11:58 crc kubenswrapper[4691]: I1124 09:11:58.306312 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-pt2cl"] Nov 24 09:11:58 crc kubenswrapper[4691]: I1124 09:11:58.323166 4691 scope.go:117] "RemoveContainer" containerID="2e21426469d7a885191e7824575610b107d66c091fa8ef7ed9fc8f9046bf40ff" Nov 24 09:11:58 crc kubenswrapper[4691]: I1124 09:11:58.354372 4691 scope.go:117] "RemoveContainer" containerID="00b10c01caccd30a22ae192f787e715d434eadc997639a58231d5973173b1ede" Nov 24 09:11:58 crc kubenswrapper[4691]: E1124 09:11:58.355370 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"00b10c01caccd30a22ae192f787e715d434eadc997639a58231d5973173b1ede\": container with ID starting with 00b10c01caccd30a22ae192f787e715d434eadc997639a58231d5973173b1ede not found: ID does not exist" containerID="00b10c01caccd30a22ae192f787e715d434eadc997639a58231d5973173b1ede" Nov 24 09:11:58 crc kubenswrapper[4691]: I1124 09:11:58.355411 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00b10c01caccd30a22ae192f787e715d434eadc997639a58231d5973173b1ede"} err="failed to get container status \"00b10c01caccd30a22ae192f787e715d434eadc997639a58231d5973173b1ede\": rpc error: code = NotFound desc = could not find container \"00b10c01caccd30a22ae192f787e715d434eadc997639a58231d5973173b1ede\": container with ID starting with 00b10c01caccd30a22ae192f787e715d434eadc997639a58231d5973173b1ede not found: ID does not exist" Nov 24 09:11:58 crc kubenswrapper[4691]: I1124 09:11:58.355437 4691 scope.go:117] "RemoveContainer" containerID="f076789b46a5f1f237dc7771af88986471faed0ba440cef9346cd05c0148da81" Nov 24 09:11:58 crc kubenswrapper[4691]: E1124 09:11:58.355946 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f076789b46a5f1f237dc7771af88986471faed0ba440cef9346cd05c0148da81\": container with ID starting with f076789b46a5f1f237dc7771af88986471faed0ba440cef9346cd05c0148da81 not found: ID does not exist" containerID="f076789b46a5f1f237dc7771af88986471faed0ba440cef9346cd05c0148da81" Nov 24 09:11:58 crc kubenswrapper[4691]: I1124 09:11:58.356003 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f076789b46a5f1f237dc7771af88986471faed0ba440cef9346cd05c0148da81"} err="failed to get container status \"f076789b46a5f1f237dc7771af88986471faed0ba440cef9346cd05c0148da81\": rpc error: code = NotFound desc = could not find container \"f076789b46a5f1f237dc7771af88986471faed0ba440cef9346cd05c0148da81\": container with ID starting with f076789b46a5f1f237dc7771af88986471faed0ba440cef9346cd05c0148da81 not found: ID does not exist" Nov 24 09:11:58 crc kubenswrapper[4691]: I1124 09:11:58.356038 4691 scope.go:117] "RemoveContainer" containerID="2e21426469d7a885191e7824575610b107d66c091fa8ef7ed9fc8f9046bf40ff" Nov 24 09:11:58 crc kubenswrapper[4691]: E1124 09:11:58.356428 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2e21426469d7a885191e7824575610b107d66c091fa8ef7ed9fc8f9046bf40ff\": container with ID starting with 2e21426469d7a885191e7824575610b107d66c091fa8ef7ed9fc8f9046bf40ff not found: ID does not exist" containerID="2e21426469d7a885191e7824575610b107d66c091fa8ef7ed9fc8f9046bf40ff" Nov 24 09:11:58 crc kubenswrapper[4691]: I1124 09:11:58.356499 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2e21426469d7a885191e7824575610b107d66c091fa8ef7ed9fc8f9046bf40ff"} err="failed to get container status \"2e21426469d7a885191e7824575610b107d66c091fa8ef7ed9fc8f9046bf40ff\": rpc error: code = NotFound desc = could not find container \"2e21426469d7a885191e7824575610b107d66c091fa8ef7ed9fc8f9046bf40ff\": container with ID starting with 2e21426469d7a885191e7824575610b107d66c091fa8ef7ed9fc8f9046bf40ff not found: ID does not exist" Nov 24 09:11:58 crc kubenswrapper[4691]: I1124 09:11:58.802256 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef" path="/var/lib/kubelet/pods/f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef/volumes" Nov 24 09:12:09 crc kubenswrapper[4691]: I1124 09:12:09.760824 4691 scope.go:117] "RemoveContainer" containerID="abdae46e1c23786722cc1d9ee7b656223655944b4200188cd4ea22940b9ff61f" Nov 24 09:12:09 crc kubenswrapper[4691]: E1124 09:12:09.761720 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:12:20 crc kubenswrapper[4691]: I1124 09:12:20.761143 4691 scope.go:117] "RemoveContainer" containerID="abdae46e1c23786722cc1d9ee7b656223655944b4200188cd4ea22940b9ff61f" Nov 24 09:12:20 crc kubenswrapper[4691]: E1124 09:12:20.763112 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:12:34 crc kubenswrapper[4691]: I1124 09:12:34.761503 4691 scope.go:117] "RemoveContainer" containerID="abdae46e1c23786722cc1d9ee7b656223655944b4200188cd4ea22940b9ff61f" Nov 24 09:12:34 crc kubenswrapper[4691]: E1124 09:12:34.763475 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:12:47 crc kubenswrapper[4691]: I1124 09:12:47.760989 4691 scope.go:117] "RemoveContainer" containerID="abdae46e1c23786722cc1d9ee7b656223655944b4200188cd4ea22940b9ff61f" Nov 24 09:12:47 crc kubenswrapper[4691]: E1124 09:12:47.761829 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:13:00 crc kubenswrapper[4691]: I1124 09:13:00.761257 4691 scope.go:117] "RemoveContainer" containerID="abdae46e1c23786722cc1d9ee7b656223655944b4200188cd4ea22940b9ff61f" Nov 24 09:13:00 crc kubenswrapper[4691]: E1124 09:13:00.762601 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:13:13 crc kubenswrapper[4691]: I1124 09:13:13.761168 4691 scope.go:117] "RemoveContainer" containerID="abdae46e1c23786722cc1d9ee7b656223655944b4200188cd4ea22940b9ff61f" Nov 24 09:13:13 crc kubenswrapper[4691]: E1124 09:13:13.762075 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:13:19 crc kubenswrapper[4691]: I1124 09:13:19.566206 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-dwqj5"] Nov 24 09:13:19 crc kubenswrapper[4691]: E1124 09:13:19.567039 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef" containerName="extract-content" Nov 24 09:13:19 crc kubenswrapper[4691]: I1124 09:13:19.567052 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef" containerName="extract-content" Nov 24 09:13:19 crc kubenswrapper[4691]: E1124 09:13:19.567083 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef" containerName="registry-server" Nov 24 09:13:19 crc kubenswrapper[4691]: I1124 09:13:19.567088 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef" containerName="registry-server" Nov 24 09:13:19 crc kubenswrapper[4691]: E1124 09:13:19.567104 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef" containerName="extract-utilities" Nov 24 09:13:19 crc kubenswrapper[4691]: I1124 09:13:19.567110 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef" containerName="extract-utilities" Nov 24 09:13:19 crc kubenswrapper[4691]: I1124 09:13:19.567311 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5e922ac-b170-4ac8-bd35-2ba8ff78b3ef" containerName="registry-server" Nov 24 09:13:19 crc kubenswrapper[4691]: I1124 09:13:19.568573 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dwqj5" Nov 24 09:13:19 crc kubenswrapper[4691]: I1124 09:13:19.581956 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dwqj5"] Nov 24 09:13:19 crc kubenswrapper[4691]: I1124 09:13:19.654653 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l5jjm\" (UniqueName: \"kubernetes.io/projected/e0d97742-7b12-49a6-9e0f-6c720bfbf4f5-kube-api-access-l5jjm\") pod \"community-operators-dwqj5\" (UID: \"e0d97742-7b12-49a6-9e0f-6c720bfbf4f5\") " pod="openshift-marketplace/community-operators-dwqj5" Nov 24 09:13:19 crc kubenswrapper[4691]: I1124 09:13:19.654940 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0d97742-7b12-49a6-9e0f-6c720bfbf4f5-utilities\") pod \"community-operators-dwqj5\" (UID: \"e0d97742-7b12-49a6-9e0f-6c720bfbf4f5\") " pod="openshift-marketplace/community-operators-dwqj5" Nov 24 09:13:19 crc kubenswrapper[4691]: I1124 09:13:19.655108 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0d97742-7b12-49a6-9e0f-6c720bfbf4f5-catalog-content\") pod \"community-operators-dwqj5\" (UID: \"e0d97742-7b12-49a6-9e0f-6c720bfbf4f5\") " pod="openshift-marketplace/community-operators-dwqj5" Nov 24 09:13:19 crc kubenswrapper[4691]: I1124 09:13:19.757113 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l5jjm\" (UniqueName: \"kubernetes.io/projected/e0d97742-7b12-49a6-9e0f-6c720bfbf4f5-kube-api-access-l5jjm\") pod \"community-operators-dwqj5\" (UID: \"e0d97742-7b12-49a6-9e0f-6c720bfbf4f5\") " pod="openshift-marketplace/community-operators-dwqj5" Nov 24 09:13:19 crc kubenswrapper[4691]: I1124 09:13:19.757253 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0d97742-7b12-49a6-9e0f-6c720bfbf4f5-utilities\") pod \"community-operators-dwqj5\" (UID: \"e0d97742-7b12-49a6-9e0f-6c720bfbf4f5\") " pod="openshift-marketplace/community-operators-dwqj5" Nov 24 09:13:19 crc kubenswrapper[4691]: I1124 09:13:19.757312 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0d97742-7b12-49a6-9e0f-6c720bfbf4f5-catalog-content\") pod \"community-operators-dwqj5\" (UID: \"e0d97742-7b12-49a6-9e0f-6c720bfbf4f5\") " pod="openshift-marketplace/community-operators-dwqj5" Nov 24 09:13:19 crc kubenswrapper[4691]: I1124 09:13:19.757920 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0d97742-7b12-49a6-9e0f-6c720bfbf4f5-utilities\") pod \"community-operators-dwqj5\" (UID: \"e0d97742-7b12-49a6-9e0f-6c720bfbf4f5\") " pod="openshift-marketplace/community-operators-dwqj5" Nov 24 09:13:19 crc kubenswrapper[4691]: I1124 09:13:19.757930 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0d97742-7b12-49a6-9e0f-6c720bfbf4f5-catalog-content\") pod \"community-operators-dwqj5\" (UID: \"e0d97742-7b12-49a6-9e0f-6c720bfbf4f5\") " pod="openshift-marketplace/community-operators-dwqj5" Nov 24 09:13:19 crc kubenswrapper[4691]: I1124 09:13:19.785618 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l5jjm\" (UniqueName: \"kubernetes.io/projected/e0d97742-7b12-49a6-9e0f-6c720bfbf4f5-kube-api-access-l5jjm\") pod \"community-operators-dwqj5\" (UID: \"e0d97742-7b12-49a6-9e0f-6c720bfbf4f5\") " pod="openshift-marketplace/community-operators-dwqj5" Nov 24 09:13:19 crc kubenswrapper[4691]: I1124 09:13:19.938640 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dwqj5" Nov 24 09:13:20 crc kubenswrapper[4691]: I1124 09:13:20.478321 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dwqj5"] Nov 24 09:13:20 crc kubenswrapper[4691]: I1124 09:13:20.996305 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dwqj5" event={"ID":"e0d97742-7b12-49a6-9e0f-6c720bfbf4f5","Type":"ContainerStarted","Data":"d920d0ffbae0a400cde2b40fa9968874a7fed1aa0440dd9c727ebfccc3940518"} Nov 24 09:13:22 crc kubenswrapper[4691]: I1124 09:13:22.005008 4691 generic.go:334] "Generic (PLEG): container finished" podID="e0d97742-7b12-49a6-9e0f-6c720bfbf4f5" containerID="9bd82e7e2bc35a866b6649f02f744dc26a564e1323e2b2a7cc2af77f2b1a8a5a" exitCode=0 Nov 24 09:13:22 crc kubenswrapper[4691]: I1124 09:13:22.005087 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dwqj5" event={"ID":"e0d97742-7b12-49a6-9e0f-6c720bfbf4f5","Type":"ContainerDied","Data":"9bd82e7e2bc35a866b6649f02f744dc26a564e1323e2b2a7cc2af77f2b1a8a5a"} Nov 24 09:13:23 crc kubenswrapper[4691]: I1124 09:13:23.017912 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dwqj5" event={"ID":"e0d97742-7b12-49a6-9e0f-6c720bfbf4f5","Type":"ContainerStarted","Data":"f3a020f0de667b1392a0c18546added697474bed3b1910fbbce75f7f21b91a36"} Nov 24 09:13:24 crc kubenswrapper[4691]: I1124 09:13:24.026974 4691 generic.go:334] "Generic (PLEG): container finished" podID="e0d97742-7b12-49a6-9e0f-6c720bfbf4f5" containerID="f3a020f0de667b1392a0c18546added697474bed3b1910fbbce75f7f21b91a36" exitCode=0 Nov 24 09:13:24 crc kubenswrapper[4691]: I1124 09:13:24.027083 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dwqj5" event={"ID":"e0d97742-7b12-49a6-9e0f-6c720bfbf4f5","Type":"ContainerDied","Data":"f3a020f0de667b1392a0c18546added697474bed3b1910fbbce75f7f21b91a36"} Nov 24 09:13:25 crc kubenswrapper[4691]: I1124 09:13:25.037929 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dwqj5" event={"ID":"e0d97742-7b12-49a6-9e0f-6c720bfbf4f5","Type":"ContainerStarted","Data":"5562b33f8a0212a3b80370cd772f05165570b57fbfe5469e7baec6683b84c348"} Nov 24 09:13:25 crc kubenswrapper[4691]: I1124 09:13:25.062314 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-dwqj5" podStartSLOduration=3.40044867 podStartE2EDuration="6.062298741s" podCreationTimestamp="2025-11-24 09:13:19 +0000 UTC" firstStartedPulling="2025-11-24 09:13:22.007182085 +0000 UTC m=+4564.006131334" lastFinishedPulling="2025-11-24 09:13:24.669032146 +0000 UTC m=+4566.667981405" observedRunningTime="2025-11-24 09:13:25.055849267 +0000 UTC m=+4567.054798516" watchObservedRunningTime="2025-11-24 09:13:25.062298741 +0000 UTC m=+4567.061247990" Nov 24 09:13:25 crc kubenswrapper[4691]: I1124 09:13:25.760911 4691 scope.go:117] "RemoveContainer" containerID="abdae46e1c23786722cc1d9ee7b656223655944b4200188cd4ea22940b9ff61f" Nov 24 09:13:25 crc kubenswrapper[4691]: E1124 09:13:25.761237 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:13:29 crc kubenswrapper[4691]: I1124 09:13:29.939545 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-dwqj5" Nov 24 09:13:29 crc kubenswrapper[4691]: I1124 09:13:29.940649 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-dwqj5" Nov 24 09:13:29 crc kubenswrapper[4691]: I1124 09:13:29.998718 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-dwqj5" Nov 24 09:13:30 crc kubenswrapper[4691]: I1124 09:13:30.128732 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-dwqj5" Nov 24 09:13:30 crc kubenswrapper[4691]: I1124 09:13:30.237069 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-dwqj5"] Nov 24 09:13:32 crc kubenswrapper[4691]: I1124 09:13:32.098890 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-dwqj5" podUID="e0d97742-7b12-49a6-9e0f-6c720bfbf4f5" containerName="registry-server" containerID="cri-o://5562b33f8a0212a3b80370cd772f05165570b57fbfe5469e7baec6683b84c348" gracePeriod=2 Nov 24 09:13:32 crc kubenswrapper[4691]: I1124 09:13:32.607565 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dwqj5" Nov 24 09:13:32 crc kubenswrapper[4691]: I1124 09:13:32.740798 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l5jjm\" (UniqueName: \"kubernetes.io/projected/e0d97742-7b12-49a6-9e0f-6c720bfbf4f5-kube-api-access-l5jjm\") pod \"e0d97742-7b12-49a6-9e0f-6c720bfbf4f5\" (UID: \"e0d97742-7b12-49a6-9e0f-6c720bfbf4f5\") " Nov 24 09:13:32 crc kubenswrapper[4691]: I1124 09:13:32.740911 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0d97742-7b12-49a6-9e0f-6c720bfbf4f5-utilities\") pod \"e0d97742-7b12-49a6-9e0f-6c720bfbf4f5\" (UID: \"e0d97742-7b12-49a6-9e0f-6c720bfbf4f5\") " Nov 24 09:13:32 crc kubenswrapper[4691]: I1124 09:13:32.741047 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0d97742-7b12-49a6-9e0f-6c720bfbf4f5-catalog-content\") pod \"e0d97742-7b12-49a6-9e0f-6c720bfbf4f5\" (UID: \"e0d97742-7b12-49a6-9e0f-6c720bfbf4f5\") " Nov 24 09:13:32 crc kubenswrapper[4691]: I1124 09:13:32.741916 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e0d97742-7b12-49a6-9e0f-6c720bfbf4f5-utilities" (OuterVolumeSpecName: "utilities") pod "e0d97742-7b12-49a6-9e0f-6c720bfbf4f5" (UID: "e0d97742-7b12-49a6-9e0f-6c720bfbf4f5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 09:13:32 crc kubenswrapper[4691]: I1124 09:13:32.746018 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0d97742-7b12-49a6-9e0f-6c720bfbf4f5-kube-api-access-l5jjm" (OuterVolumeSpecName: "kube-api-access-l5jjm") pod "e0d97742-7b12-49a6-9e0f-6c720bfbf4f5" (UID: "e0d97742-7b12-49a6-9e0f-6c720bfbf4f5"). InnerVolumeSpecName "kube-api-access-l5jjm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 09:13:32 crc kubenswrapper[4691]: I1124 09:13:32.801235 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e0d97742-7b12-49a6-9e0f-6c720bfbf4f5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e0d97742-7b12-49a6-9e0f-6c720bfbf4f5" (UID: "e0d97742-7b12-49a6-9e0f-6c720bfbf4f5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 09:13:32 crc kubenswrapper[4691]: I1124 09:13:32.842970 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0d97742-7b12-49a6-9e0f-6c720bfbf4f5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 09:13:32 crc kubenswrapper[4691]: I1124 09:13:32.843018 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l5jjm\" (UniqueName: \"kubernetes.io/projected/e0d97742-7b12-49a6-9e0f-6c720bfbf4f5-kube-api-access-l5jjm\") on node \"crc\" DevicePath \"\"" Nov 24 09:13:32 crc kubenswrapper[4691]: I1124 09:13:32.843031 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0d97742-7b12-49a6-9e0f-6c720bfbf4f5-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 09:13:33 crc kubenswrapper[4691]: I1124 09:13:33.111399 4691 generic.go:334] "Generic (PLEG): container finished" podID="e0d97742-7b12-49a6-9e0f-6c720bfbf4f5" containerID="5562b33f8a0212a3b80370cd772f05165570b57fbfe5469e7baec6683b84c348" exitCode=0 Nov 24 09:13:33 crc kubenswrapper[4691]: I1124 09:13:33.111441 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dwqj5" event={"ID":"e0d97742-7b12-49a6-9e0f-6c720bfbf4f5","Type":"ContainerDied","Data":"5562b33f8a0212a3b80370cd772f05165570b57fbfe5469e7baec6683b84c348"} Nov 24 09:13:33 crc kubenswrapper[4691]: I1124 09:13:33.111509 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dwqj5" Nov 24 09:13:33 crc kubenswrapper[4691]: I1124 09:13:33.111727 4691 scope.go:117] "RemoveContainer" containerID="5562b33f8a0212a3b80370cd772f05165570b57fbfe5469e7baec6683b84c348" Nov 24 09:13:33 crc kubenswrapper[4691]: I1124 09:13:33.111713 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dwqj5" event={"ID":"e0d97742-7b12-49a6-9e0f-6c720bfbf4f5","Type":"ContainerDied","Data":"d920d0ffbae0a400cde2b40fa9968874a7fed1aa0440dd9c727ebfccc3940518"} Nov 24 09:13:33 crc kubenswrapper[4691]: I1124 09:13:33.143904 4691 scope.go:117] "RemoveContainer" containerID="f3a020f0de667b1392a0c18546added697474bed3b1910fbbce75f7f21b91a36" Nov 24 09:13:33 crc kubenswrapper[4691]: I1124 09:13:33.162923 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-dwqj5"] Nov 24 09:13:33 crc kubenswrapper[4691]: I1124 09:13:33.176362 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-dwqj5"] Nov 24 09:13:33 crc kubenswrapper[4691]: I1124 09:13:33.378906 4691 scope.go:117] "RemoveContainer" containerID="9bd82e7e2bc35a866b6649f02f744dc26a564e1323e2b2a7cc2af77f2b1a8a5a" Nov 24 09:13:33 crc kubenswrapper[4691]: I1124 09:13:33.419929 4691 scope.go:117] "RemoveContainer" containerID="5562b33f8a0212a3b80370cd772f05165570b57fbfe5469e7baec6683b84c348" Nov 24 09:13:33 crc kubenswrapper[4691]: E1124 09:13:33.420588 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5562b33f8a0212a3b80370cd772f05165570b57fbfe5469e7baec6683b84c348\": container with ID starting with 5562b33f8a0212a3b80370cd772f05165570b57fbfe5469e7baec6683b84c348 not found: ID does not exist" containerID="5562b33f8a0212a3b80370cd772f05165570b57fbfe5469e7baec6683b84c348" Nov 24 09:13:33 crc kubenswrapper[4691]: I1124 09:13:33.420661 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5562b33f8a0212a3b80370cd772f05165570b57fbfe5469e7baec6683b84c348"} err="failed to get container status \"5562b33f8a0212a3b80370cd772f05165570b57fbfe5469e7baec6683b84c348\": rpc error: code = NotFound desc = could not find container \"5562b33f8a0212a3b80370cd772f05165570b57fbfe5469e7baec6683b84c348\": container with ID starting with 5562b33f8a0212a3b80370cd772f05165570b57fbfe5469e7baec6683b84c348 not found: ID does not exist" Nov 24 09:13:33 crc kubenswrapper[4691]: I1124 09:13:33.420708 4691 scope.go:117] "RemoveContainer" containerID="f3a020f0de667b1392a0c18546added697474bed3b1910fbbce75f7f21b91a36" Nov 24 09:13:33 crc kubenswrapper[4691]: E1124 09:13:33.421207 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f3a020f0de667b1392a0c18546added697474bed3b1910fbbce75f7f21b91a36\": container with ID starting with f3a020f0de667b1392a0c18546added697474bed3b1910fbbce75f7f21b91a36 not found: ID does not exist" containerID="f3a020f0de667b1392a0c18546added697474bed3b1910fbbce75f7f21b91a36" Nov 24 09:13:33 crc kubenswrapper[4691]: I1124 09:13:33.421235 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f3a020f0de667b1392a0c18546added697474bed3b1910fbbce75f7f21b91a36"} err="failed to get container status \"f3a020f0de667b1392a0c18546added697474bed3b1910fbbce75f7f21b91a36\": rpc error: code = NotFound desc = could not find container \"f3a020f0de667b1392a0c18546added697474bed3b1910fbbce75f7f21b91a36\": container with ID starting with f3a020f0de667b1392a0c18546added697474bed3b1910fbbce75f7f21b91a36 not found: ID does not exist" Nov 24 09:13:33 crc kubenswrapper[4691]: I1124 09:13:33.421251 4691 scope.go:117] "RemoveContainer" containerID="9bd82e7e2bc35a866b6649f02f744dc26a564e1323e2b2a7cc2af77f2b1a8a5a" Nov 24 09:13:33 crc kubenswrapper[4691]: E1124 09:13:33.421626 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9bd82e7e2bc35a866b6649f02f744dc26a564e1323e2b2a7cc2af77f2b1a8a5a\": container with ID starting with 9bd82e7e2bc35a866b6649f02f744dc26a564e1323e2b2a7cc2af77f2b1a8a5a not found: ID does not exist" containerID="9bd82e7e2bc35a866b6649f02f744dc26a564e1323e2b2a7cc2af77f2b1a8a5a" Nov 24 09:13:33 crc kubenswrapper[4691]: I1124 09:13:33.421652 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9bd82e7e2bc35a866b6649f02f744dc26a564e1323e2b2a7cc2af77f2b1a8a5a"} err="failed to get container status \"9bd82e7e2bc35a866b6649f02f744dc26a564e1323e2b2a7cc2af77f2b1a8a5a\": rpc error: code = NotFound desc = could not find container \"9bd82e7e2bc35a866b6649f02f744dc26a564e1323e2b2a7cc2af77f2b1a8a5a\": container with ID starting with 9bd82e7e2bc35a866b6649f02f744dc26a564e1323e2b2a7cc2af77f2b1a8a5a not found: ID does not exist" Nov 24 09:13:34 crc kubenswrapper[4691]: I1124 09:13:34.780753 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e0d97742-7b12-49a6-9e0f-6c720bfbf4f5" path="/var/lib/kubelet/pods/e0d97742-7b12-49a6-9e0f-6c720bfbf4f5/volumes" Nov 24 09:13:38 crc kubenswrapper[4691]: I1124 09:13:38.767214 4691 scope.go:117] "RemoveContainer" containerID="abdae46e1c23786722cc1d9ee7b656223655944b4200188cd4ea22940b9ff61f" Nov 24 09:13:38 crc kubenswrapper[4691]: E1124 09:13:38.768016 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:13:49 crc kubenswrapper[4691]: I1124 09:13:49.764001 4691 scope.go:117] "RemoveContainer" containerID="abdae46e1c23786722cc1d9ee7b656223655944b4200188cd4ea22940b9ff61f" Nov 24 09:13:49 crc kubenswrapper[4691]: E1124 09:13:49.764844 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:14:04 crc kubenswrapper[4691]: I1124 09:14:04.761145 4691 scope.go:117] "RemoveContainer" containerID="abdae46e1c23786722cc1d9ee7b656223655944b4200188cd4ea22940b9ff61f" Nov 24 09:14:04 crc kubenswrapper[4691]: E1124 09:14:04.762022 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:14:15 crc kubenswrapper[4691]: I1124 09:14:15.760684 4691 scope.go:117] "RemoveContainer" containerID="abdae46e1c23786722cc1d9ee7b656223655944b4200188cd4ea22940b9ff61f" Nov 24 09:14:15 crc kubenswrapper[4691]: E1124 09:14:15.761465 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:14:28 crc kubenswrapper[4691]: I1124 09:14:28.769340 4691 scope.go:117] "RemoveContainer" containerID="abdae46e1c23786722cc1d9ee7b656223655944b4200188cd4ea22940b9ff61f" Nov 24 09:14:29 crc kubenswrapper[4691]: I1124 09:14:29.657967 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerStarted","Data":"c3bc47ddb7c02824987171379a4855ed137dcbbad67fcea843b9fbf709d1019a"} Nov 24 09:15:00 crc kubenswrapper[4691]: I1124 09:15:00.294089 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399595-l5n77"] Nov 24 09:15:00 crc kubenswrapper[4691]: E1124 09:15:00.295066 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0d97742-7b12-49a6-9e0f-6c720bfbf4f5" containerName="registry-server" Nov 24 09:15:00 crc kubenswrapper[4691]: I1124 09:15:00.295080 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0d97742-7b12-49a6-9e0f-6c720bfbf4f5" containerName="registry-server" Nov 24 09:15:00 crc kubenswrapper[4691]: E1124 09:15:00.295094 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0d97742-7b12-49a6-9e0f-6c720bfbf4f5" containerName="extract-content" Nov 24 09:15:00 crc kubenswrapper[4691]: I1124 09:15:00.295100 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0d97742-7b12-49a6-9e0f-6c720bfbf4f5" containerName="extract-content" Nov 24 09:15:00 crc kubenswrapper[4691]: E1124 09:15:00.295122 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0d97742-7b12-49a6-9e0f-6c720bfbf4f5" containerName="extract-utilities" Nov 24 09:15:00 crc kubenswrapper[4691]: I1124 09:15:00.295129 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0d97742-7b12-49a6-9e0f-6c720bfbf4f5" containerName="extract-utilities" Nov 24 09:15:00 crc kubenswrapper[4691]: I1124 09:15:00.295331 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0d97742-7b12-49a6-9e0f-6c720bfbf4f5" containerName="registry-server" Nov 24 09:15:00 crc kubenswrapper[4691]: I1124 09:15:00.296042 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399595-l5n77" Nov 24 09:15:00 crc kubenswrapper[4691]: I1124 09:15:00.302125 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 24 09:15:00 crc kubenswrapper[4691]: I1124 09:15:00.302334 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 24 09:15:00 crc kubenswrapper[4691]: I1124 09:15:00.304115 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399595-l5n77"] Nov 24 09:15:00 crc kubenswrapper[4691]: I1124 09:15:00.392624 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4d2c480a-674f-4f95-8d05-add0c6f9b919-config-volume\") pod \"collect-profiles-29399595-l5n77\" (UID: \"4d2c480a-674f-4f95-8d05-add0c6f9b919\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399595-l5n77" Nov 24 09:15:00 crc kubenswrapper[4691]: I1124 09:15:00.392676 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6dpsg\" (UniqueName: \"kubernetes.io/projected/4d2c480a-674f-4f95-8d05-add0c6f9b919-kube-api-access-6dpsg\") pod \"collect-profiles-29399595-l5n77\" (UID: \"4d2c480a-674f-4f95-8d05-add0c6f9b919\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399595-l5n77" Nov 24 09:15:00 crc kubenswrapper[4691]: I1124 09:15:00.392881 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4d2c480a-674f-4f95-8d05-add0c6f9b919-secret-volume\") pod \"collect-profiles-29399595-l5n77\" (UID: \"4d2c480a-674f-4f95-8d05-add0c6f9b919\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399595-l5n77" Nov 24 09:15:00 crc kubenswrapper[4691]: I1124 09:15:00.494243 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6dpsg\" (UniqueName: \"kubernetes.io/projected/4d2c480a-674f-4f95-8d05-add0c6f9b919-kube-api-access-6dpsg\") pod \"collect-profiles-29399595-l5n77\" (UID: \"4d2c480a-674f-4f95-8d05-add0c6f9b919\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399595-l5n77" Nov 24 09:15:00 crc kubenswrapper[4691]: I1124 09:15:00.494338 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4d2c480a-674f-4f95-8d05-add0c6f9b919-secret-volume\") pod \"collect-profiles-29399595-l5n77\" (UID: \"4d2c480a-674f-4f95-8d05-add0c6f9b919\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399595-l5n77" Nov 24 09:15:00 crc kubenswrapper[4691]: I1124 09:15:00.494427 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4d2c480a-674f-4f95-8d05-add0c6f9b919-config-volume\") pod \"collect-profiles-29399595-l5n77\" (UID: \"4d2c480a-674f-4f95-8d05-add0c6f9b919\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399595-l5n77" Nov 24 09:15:00 crc kubenswrapper[4691]: I1124 09:15:00.495345 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4d2c480a-674f-4f95-8d05-add0c6f9b919-config-volume\") pod \"collect-profiles-29399595-l5n77\" (UID: \"4d2c480a-674f-4f95-8d05-add0c6f9b919\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399595-l5n77" Nov 24 09:15:00 crc kubenswrapper[4691]: I1124 09:15:00.508301 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4d2c480a-674f-4f95-8d05-add0c6f9b919-secret-volume\") pod \"collect-profiles-29399595-l5n77\" (UID: \"4d2c480a-674f-4f95-8d05-add0c6f9b919\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399595-l5n77" Nov 24 09:15:00 crc kubenswrapper[4691]: I1124 09:15:00.511060 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6dpsg\" (UniqueName: \"kubernetes.io/projected/4d2c480a-674f-4f95-8d05-add0c6f9b919-kube-api-access-6dpsg\") pod \"collect-profiles-29399595-l5n77\" (UID: \"4d2c480a-674f-4f95-8d05-add0c6f9b919\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399595-l5n77" Nov 24 09:15:00 crc kubenswrapper[4691]: I1124 09:15:00.616917 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399595-l5n77" Nov 24 09:15:01 crc kubenswrapper[4691]: I1124 09:15:01.061662 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399595-l5n77"] Nov 24 09:15:01 crc kubenswrapper[4691]: I1124 09:15:01.988250 4691 generic.go:334] "Generic (PLEG): container finished" podID="4d2c480a-674f-4f95-8d05-add0c6f9b919" containerID="9f0f30235bc9d26e775bf72266fce5fc006f7cfc1a6f348b29082335dcb97f02" exitCode=0 Nov 24 09:15:01 crc kubenswrapper[4691]: I1124 09:15:01.988514 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399595-l5n77" event={"ID":"4d2c480a-674f-4f95-8d05-add0c6f9b919","Type":"ContainerDied","Data":"9f0f30235bc9d26e775bf72266fce5fc006f7cfc1a6f348b29082335dcb97f02"} Nov 24 09:15:01 crc kubenswrapper[4691]: I1124 09:15:01.988547 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399595-l5n77" event={"ID":"4d2c480a-674f-4f95-8d05-add0c6f9b919","Type":"ContainerStarted","Data":"14b7acc389fa0e7bc3370f0e51d0edbd13707c1747a7397f8b1398df8e6fe142"} Nov 24 09:15:03 crc kubenswrapper[4691]: I1124 09:15:03.338016 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399595-l5n77" Nov 24 09:15:03 crc kubenswrapper[4691]: I1124 09:15:03.353034 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4d2c480a-674f-4f95-8d05-add0c6f9b919-config-volume\") pod \"4d2c480a-674f-4f95-8d05-add0c6f9b919\" (UID: \"4d2c480a-674f-4f95-8d05-add0c6f9b919\") " Nov 24 09:15:03 crc kubenswrapper[4691]: I1124 09:15:03.353129 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6dpsg\" (UniqueName: \"kubernetes.io/projected/4d2c480a-674f-4f95-8d05-add0c6f9b919-kube-api-access-6dpsg\") pod \"4d2c480a-674f-4f95-8d05-add0c6f9b919\" (UID: \"4d2c480a-674f-4f95-8d05-add0c6f9b919\") " Nov 24 09:15:03 crc kubenswrapper[4691]: I1124 09:15:03.353233 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4d2c480a-674f-4f95-8d05-add0c6f9b919-secret-volume\") pod \"4d2c480a-674f-4f95-8d05-add0c6f9b919\" (UID: \"4d2c480a-674f-4f95-8d05-add0c6f9b919\") " Nov 24 09:15:03 crc kubenswrapper[4691]: I1124 09:15:03.353777 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d2c480a-674f-4f95-8d05-add0c6f9b919-config-volume" (OuterVolumeSpecName: "config-volume") pod "4d2c480a-674f-4f95-8d05-add0c6f9b919" (UID: "4d2c480a-674f-4f95-8d05-add0c6f9b919"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 09:15:03 crc kubenswrapper[4691]: I1124 09:15:03.360641 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d2c480a-674f-4f95-8d05-add0c6f9b919-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "4d2c480a-674f-4f95-8d05-add0c6f9b919" (UID: "4d2c480a-674f-4f95-8d05-add0c6f9b919"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 09:15:03 crc kubenswrapper[4691]: I1124 09:15:03.361814 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d2c480a-674f-4f95-8d05-add0c6f9b919-kube-api-access-6dpsg" (OuterVolumeSpecName: "kube-api-access-6dpsg") pod "4d2c480a-674f-4f95-8d05-add0c6f9b919" (UID: "4d2c480a-674f-4f95-8d05-add0c6f9b919"). InnerVolumeSpecName "kube-api-access-6dpsg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 09:15:03 crc kubenswrapper[4691]: I1124 09:15:03.455736 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6dpsg\" (UniqueName: \"kubernetes.io/projected/4d2c480a-674f-4f95-8d05-add0c6f9b919-kube-api-access-6dpsg\") on node \"crc\" DevicePath \"\"" Nov 24 09:15:03 crc kubenswrapper[4691]: I1124 09:15:03.455785 4691 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4d2c480a-674f-4f95-8d05-add0c6f9b919-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 24 09:15:03 crc kubenswrapper[4691]: I1124 09:15:03.455796 4691 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4d2c480a-674f-4f95-8d05-add0c6f9b919-config-volume\") on node \"crc\" DevicePath \"\"" Nov 24 09:15:04 crc kubenswrapper[4691]: I1124 09:15:04.008718 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399595-l5n77" event={"ID":"4d2c480a-674f-4f95-8d05-add0c6f9b919","Type":"ContainerDied","Data":"14b7acc389fa0e7bc3370f0e51d0edbd13707c1747a7397f8b1398df8e6fe142"} Nov 24 09:15:04 crc kubenswrapper[4691]: I1124 09:15:04.008760 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="14b7acc389fa0e7bc3370f0e51d0edbd13707c1747a7397f8b1398df8e6fe142" Nov 24 09:15:04 crc kubenswrapper[4691]: I1124 09:15:04.008797 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399595-l5n77" Nov 24 09:15:04 crc kubenswrapper[4691]: I1124 09:15:04.414932 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399550-sh8qx"] Nov 24 09:15:04 crc kubenswrapper[4691]: I1124 09:15:04.429772 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399550-sh8qx"] Nov 24 09:15:04 crc kubenswrapper[4691]: I1124 09:15:04.774076 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8dac9fc-69b6-4899-a8fd-7aa75e002329" path="/var/lib/kubelet/pods/d8dac9fc-69b6-4899-a8fd-7aa75e002329/volumes" Nov 24 09:15:25 crc kubenswrapper[4691]: I1124 09:15:25.002426 4691 scope.go:117] "RemoveContainer" containerID="d561ae0646a0431ec713024573bcdd28dc487e226753c4ef6fe075f36b63b97e" Nov 24 09:16:51 crc kubenswrapper[4691]: I1124 09:16:51.089981 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 09:16:51 crc kubenswrapper[4691]: I1124 09:16:51.090699 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 09:17:21 crc kubenswrapper[4691]: I1124 09:17:21.090496 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 09:17:21 crc kubenswrapper[4691]: I1124 09:17:21.091243 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 09:17:51 crc kubenswrapper[4691]: I1124 09:17:51.088965 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 09:17:51 crc kubenswrapper[4691]: I1124 09:17:51.089504 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 09:17:51 crc kubenswrapper[4691]: I1124 09:17:51.089548 4691 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 09:17:51 crc kubenswrapper[4691]: I1124 09:17:51.090295 4691 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c3bc47ddb7c02824987171379a4855ed137dcbbad67fcea843b9fbf709d1019a"} pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 09:17:51 crc kubenswrapper[4691]: I1124 09:17:51.090340 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" containerID="cri-o://c3bc47ddb7c02824987171379a4855ed137dcbbad67fcea843b9fbf709d1019a" gracePeriod=600 Nov 24 09:17:51 crc kubenswrapper[4691]: I1124 09:17:51.651765 4691 generic.go:334] "Generic (PLEG): container finished" podID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerID="c3bc47ddb7c02824987171379a4855ed137dcbbad67fcea843b9fbf709d1019a" exitCode=0 Nov 24 09:17:51 crc kubenswrapper[4691]: I1124 09:17:51.651847 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerDied","Data":"c3bc47ddb7c02824987171379a4855ed137dcbbad67fcea843b9fbf709d1019a"} Nov 24 09:17:51 crc kubenswrapper[4691]: I1124 09:17:51.652379 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerStarted","Data":"f0361322acc6980ed49e64daf4258e8bc09b1cf31948b359c5ff23352e9d3b26"} Nov 24 09:17:51 crc kubenswrapper[4691]: I1124 09:17:51.652403 4691 scope.go:117] "RemoveContainer" containerID="abdae46e1c23786722cc1d9ee7b656223655944b4200188cd4ea22940b9ff61f" Nov 24 09:19:34 crc kubenswrapper[4691]: I1124 09:19:34.865798 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-2v4mj"] Nov 24 09:19:34 crc kubenswrapper[4691]: E1124 09:19:34.867233 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d2c480a-674f-4f95-8d05-add0c6f9b919" containerName="collect-profiles" Nov 24 09:19:34 crc kubenswrapper[4691]: I1124 09:19:34.867252 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d2c480a-674f-4f95-8d05-add0c6f9b919" containerName="collect-profiles" Nov 24 09:19:34 crc kubenswrapper[4691]: I1124 09:19:34.867512 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d2c480a-674f-4f95-8d05-add0c6f9b919" containerName="collect-profiles" Nov 24 09:19:34 crc kubenswrapper[4691]: I1124 09:19:34.869849 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2v4mj" Nov 24 09:19:34 crc kubenswrapper[4691]: I1124 09:19:34.886107 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2v4mj"] Nov 24 09:19:34 crc kubenswrapper[4691]: I1124 09:19:34.965505 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cef97f77-31fb-40ea-ba25-e88615fb4b75-utilities\") pod \"certified-operators-2v4mj\" (UID: \"cef97f77-31fb-40ea-ba25-e88615fb4b75\") " pod="openshift-marketplace/certified-operators-2v4mj" Nov 24 09:19:34 crc kubenswrapper[4691]: I1124 09:19:34.965550 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cef97f77-31fb-40ea-ba25-e88615fb4b75-catalog-content\") pod \"certified-operators-2v4mj\" (UID: \"cef97f77-31fb-40ea-ba25-e88615fb4b75\") " pod="openshift-marketplace/certified-operators-2v4mj" Nov 24 09:19:34 crc kubenswrapper[4691]: I1124 09:19:34.965720 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6lmz\" (UniqueName: \"kubernetes.io/projected/cef97f77-31fb-40ea-ba25-e88615fb4b75-kube-api-access-g6lmz\") pod \"certified-operators-2v4mj\" (UID: \"cef97f77-31fb-40ea-ba25-e88615fb4b75\") " pod="openshift-marketplace/certified-operators-2v4mj" Nov 24 09:19:35 crc kubenswrapper[4691]: I1124 09:19:35.067768 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6lmz\" (UniqueName: \"kubernetes.io/projected/cef97f77-31fb-40ea-ba25-e88615fb4b75-kube-api-access-g6lmz\") pod \"certified-operators-2v4mj\" (UID: \"cef97f77-31fb-40ea-ba25-e88615fb4b75\") " pod="openshift-marketplace/certified-operators-2v4mj" Nov 24 09:19:35 crc kubenswrapper[4691]: I1124 09:19:35.067921 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cef97f77-31fb-40ea-ba25-e88615fb4b75-utilities\") pod \"certified-operators-2v4mj\" (UID: \"cef97f77-31fb-40ea-ba25-e88615fb4b75\") " pod="openshift-marketplace/certified-operators-2v4mj" Nov 24 09:19:35 crc kubenswrapper[4691]: I1124 09:19:35.067949 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cef97f77-31fb-40ea-ba25-e88615fb4b75-catalog-content\") pod \"certified-operators-2v4mj\" (UID: \"cef97f77-31fb-40ea-ba25-e88615fb4b75\") " pod="openshift-marketplace/certified-operators-2v4mj" Nov 24 09:19:35 crc kubenswrapper[4691]: I1124 09:19:35.068680 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cef97f77-31fb-40ea-ba25-e88615fb4b75-utilities\") pod \"certified-operators-2v4mj\" (UID: \"cef97f77-31fb-40ea-ba25-e88615fb4b75\") " pod="openshift-marketplace/certified-operators-2v4mj" Nov 24 09:19:35 crc kubenswrapper[4691]: I1124 09:19:35.068715 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cef97f77-31fb-40ea-ba25-e88615fb4b75-catalog-content\") pod \"certified-operators-2v4mj\" (UID: \"cef97f77-31fb-40ea-ba25-e88615fb4b75\") " pod="openshift-marketplace/certified-operators-2v4mj" Nov 24 09:19:35 crc kubenswrapper[4691]: I1124 09:19:35.094333 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6lmz\" (UniqueName: \"kubernetes.io/projected/cef97f77-31fb-40ea-ba25-e88615fb4b75-kube-api-access-g6lmz\") pod \"certified-operators-2v4mj\" (UID: \"cef97f77-31fb-40ea-ba25-e88615fb4b75\") " pod="openshift-marketplace/certified-operators-2v4mj" Nov 24 09:19:35 crc kubenswrapper[4691]: I1124 09:19:35.241573 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2v4mj" Nov 24 09:19:35 crc kubenswrapper[4691]: I1124 09:19:35.774134 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2v4mj"] Nov 24 09:19:36 crc kubenswrapper[4691]: I1124 09:19:36.690919 4691 generic.go:334] "Generic (PLEG): container finished" podID="cef97f77-31fb-40ea-ba25-e88615fb4b75" containerID="a2de2ceaab828bedd3c3ac86347e138aabfa62cf4a2ff756ebfa98f879e147bd" exitCode=0 Nov 24 09:19:36 crc kubenswrapper[4691]: I1124 09:19:36.690972 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2v4mj" event={"ID":"cef97f77-31fb-40ea-ba25-e88615fb4b75","Type":"ContainerDied","Data":"a2de2ceaab828bedd3c3ac86347e138aabfa62cf4a2ff756ebfa98f879e147bd"} Nov 24 09:19:36 crc kubenswrapper[4691]: I1124 09:19:36.691257 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2v4mj" event={"ID":"cef97f77-31fb-40ea-ba25-e88615fb4b75","Type":"ContainerStarted","Data":"9d9e72382a27e7a210e92f358dfc3e51ce1b8c0fdf793264787607d6a923d91c"} Nov 24 09:19:36 crc kubenswrapper[4691]: I1124 09:19:36.694929 4691 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 09:19:38 crc kubenswrapper[4691]: I1124 09:19:38.723029 4691 generic.go:334] "Generic (PLEG): container finished" podID="cef97f77-31fb-40ea-ba25-e88615fb4b75" containerID="7ae2e941c8a7e76fa0bbff857b812f2396e069f459f25b38c7d51f56f0050523" exitCode=0 Nov 24 09:19:38 crc kubenswrapper[4691]: I1124 09:19:38.723093 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2v4mj" event={"ID":"cef97f77-31fb-40ea-ba25-e88615fb4b75","Type":"ContainerDied","Data":"7ae2e941c8a7e76fa0bbff857b812f2396e069f459f25b38c7d51f56f0050523"} Nov 24 09:19:39 crc kubenswrapper[4691]: I1124 09:19:39.454664 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-j8frn"] Nov 24 09:19:39 crc kubenswrapper[4691]: I1124 09:19:39.458628 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j8frn" Nov 24 09:19:39 crc kubenswrapper[4691]: I1124 09:19:39.475026 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-j8frn"] Nov 24 09:19:39 crc kubenswrapper[4691]: I1124 09:19:39.569330 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2702915a-b038-4b30-a23d-e2d9de66243f-catalog-content\") pod \"redhat-marketplace-j8frn\" (UID: \"2702915a-b038-4b30-a23d-e2d9de66243f\") " pod="openshift-marketplace/redhat-marketplace-j8frn" Nov 24 09:19:39 crc kubenswrapper[4691]: I1124 09:19:39.569648 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6l2ms\" (UniqueName: \"kubernetes.io/projected/2702915a-b038-4b30-a23d-e2d9de66243f-kube-api-access-6l2ms\") pod \"redhat-marketplace-j8frn\" (UID: \"2702915a-b038-4b30-a23d-e2d9de66243f\") " pod="openshift-marketplace/redhat-marketplace-j8frn" Nov 24 09:19:39 crc kubenswrapper[4691]: I1124 09:19:39.569826 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2702915a-b038-4b30-a23d-e2d9de66243f-utilities\") pod \"redhat-marketplace-j8frn\" (UID: \"2702915a-b038-4b30-a23d-e2d9de66243f\") " pod="openshift-marketplace/redhat-marketplace-j8frn" Nov 24 09:19:39 crc kubenswrapper[4691]: I1124 09:19:39.671594 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2702915a-b038-4b30-a23d-e2d9de66243f-utilities\") pod \"redhat-marketplace-j8frn\" (UID: \"2702915a-b038-4b30-a23d-e2d9de66243f\") " pod="openshift-marketplace/redhat-marketplace-j8frn" Nov 24 09:19:39 crc kubenswrapper[4691]: I1124 09:19:39.671691 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2702915a-b038-4b30-a23d-e2d9de66243f-catalog-content\") pod \"redhat-marketplace-j8frn\" (UID: \"2702915a-b038-4b30-a23d-e2d9de66243f\") " pod="openshift-marketplace/redhat-marketplace-j8frn" Nov 24 09:19:39 crc kubenswrapper[4691]: I1124 09:19:39.671792 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6l2ms\" (UniqueName: \"kubernetes.io/projected/2702915a-b038-4b30-a23d-e2d9de66243f-kube-api-access-6l2ms\") pod \"redhat-marketplace-j8frn\" (UID: \"2702915a-b038-4b30-a23d-e2d9de66243f\") " pod="openshift-marketplace/redhat-marketplace-j8frn" Nov 24 09:19:39 crc kubenswrapper[4691]: I1124 09:19:39.672328 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2702915a-b038-4b30-a23d-e2d9de66243f-utilities\") pod \"redhat-marketplace-j8frn\" (UID: \"2702915a-b038-4b30-a23d-e2d9de66243f\") " pod="openshift-marketplace/redhat-marketplace-j8frn" Nov 24 09:19:39 crc kubenswrapper[4691]: I1124 09:19:39.672951 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2702915a-b038-4b30-a23d-e2d9de66243f-catalog-content\") pod \"redhat-marketplace-j8frn\" (UID: \"2702915a-b038-4b30-a23d-e2d9de66243f\") " pod="openshift-marketplace/redhat-marketplace-j8frn" Nov 24 09:19:39 crc kubenswrapper[4691]: I1124 09:19:39.698716 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6l2ms\" (UniqueName: \"kubernetes.io/projected/2702915a-b038-4b30-a23d-e2d9de66243f-kube-api-access-6l2ms\") pod \"redhat-marketplace-j8frn\" (UID: \"2702915a-b038-4b30-a23d-e2d9de66243f\") " pod="openshift-marketplace/redhat-marketplace-j8frn" Nov 24 09:19:39 crc kubenswrapper[4691]: I1124 09:19:39.745685 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2v4mj" event={"ID":"cef97f77-31fb-40ea-ba25-e88615fb4b75","Type":"ContainerStarted","Data":"d678b67ce9014695e5c0d824986346ffdb749d55378d3a160e501adb7f9205af"} Nov 24 09:19:39 crc kubenswrapper[4691]: I1124 09:19:39.787739 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-2v4mj" podStartSLOduration=3.304520874 podStartE2EDuration="5.787707435s" podCreationTimestamp="2025-11-24 09:19:34 +0000 UTC" firstStartedPulling="2025-11-24 09:19:36.694614227 +0000 UTC m=+4938.693563476" lastFinishedPulling="2025-11-24 09:19:39.177800788 +0000 UTC m=+4941.176750037" observedRunningTime="2025-11-24 09:19:39.776030333 +0000 UTC m=+4941.774979572" watchObservedRunningTime="2025-11-24 09:19:39.787707435 +0000 UTC m=+4941.786656684" Nov 24 09:19:39 crc kubenswrapper[4691]: I1124 09:19:39.788365 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j8frn" Nov 24 09:19:40 crc kubenswrapper[4691]: W1124 09:19:40.345070 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2702915a_b038_4b30_a23d_e2d9de66243f.slice/crio-6d5e3162a9f4c7c31f401bce6cb3f81a3aab0946d587c7ef2024efea9c677d5d WatchSource:0}: Error finding container 6d5e3162a9f4c7c31f401bce6cb3f81a3aab0946d587c7ef2024efea9c677d5d: Status 404 returned error can't find the container with id 6d5e3162a9f4c7c31f401bce6cb3f81a3aab0946d587c7ef2024efea9c677d5d Nov 24 09:19:40 crc kubenswrapper[4691]: I1124 09:19:40.348672 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-j8frn"] Nov 24 09:19:40 crc kubenswrapper[4691]: I1124 09:19:40.759477 4691 generic.go:334] "Generic (PLEG): container finished" podID="2702915a-b038-4b30-a23d-e2d9de66243f" containerID="b2296338f25c3139273265fcd62104ca57d6aef5f554e648adfb34cc8f1ed2f0" exitCode=0 Nov 24 09:19:40 crc kubenswrapper[4691]: I1124 09:19:40.774951 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j8frn" event={"ID":"2702915a-b038-4b30-a23d-e2d9de66243f","Type":"ContainerDied","Data":"b2296338f25c3139273265fcd62104ca57d6aef5f554e648adfb34cc8f1ed2f0"} Nov 24 09:19:40 crc kubenswrapper[4691]: I1124 09:19:40.775022 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j8frn" event={"ID":"2702915a-b038-4b30-a23d-e2d9de66243f","Type":"ContainerStarted","Data":"6d5e3162a9f4c7c31f401bce6cb3f81a3aab0946d587c7ef2024efea9c677d5d"} Nov 24 09:19:42 crc kubenswrapper[4691]: I1124 09:19:42.782222 4691 generic.go:334] "Generic (PLEG): container finished" podID="2702915a-b038-4b30-a23d-e2d9de66243f" containerID="793c785903b177c5c0a097ad92056036748246c73bc13c0b346e50677a8a5712" exitCode=0 Nov 24 09:19:42 crc kubenswrapper[4691]: I1124 09:19:42.782334 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j8frn" event={"ID":"2702915a-b038-4b30-a23d-e2d9de66243f","Type":"ContainerDied","Data":"793c785903b177c5c0a097ad92056036748246c73bc13c0b346e50677a8a5712"} Nov 24 09:19:43 crc kubenswrapper[4691]: I1124 09:19:43.796139 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j8frn" event={"ID":"2702915a-b038-4b30-a23d-e2d9de66243f","Type":"ContainerStarted","Data":"ef226bb626dc611fa28490cc91d9d7b8f2102450c73fa6652cfc91afe68d7ae5"} Nov 24 09:19:43 crc kubenswrapper[4691]: I1124 09:19:43.822788 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-j8frn" podStartSLOduration=2.104681436 podStartE2EDuration="4.82276496s" podCreationTimestamp="2025-11-24 09:19:39 +0000 UTC" firstStartedPulling="2025-11-24 09:19:40.76292456 +0000 UTC m=+4942.761873809" lastFinishedPulling="2025-11-24 09:19:43.481008084 +0000 UTC m=+4945.479957333" observedRunningTime="2025-11-24 09:19:43.817320185 +0000 UTC m=+4945.816269424" watchObservedRunningTime="2025-11-24 09:19:43.82276496 +0000 UTC m=+4945.821714199" Nov 24 09:19:45 crc kubenswrapper[4691]: I1124 09:19:45.242229 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-2v4mj" Nov 24 09:19:45 crc kubenswrapper[4691]: I1124 09:19:45.242773 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-2v4mj" Nov 24 09:19:45 crc kubenswrapper[4691]: I1124 09:19:45.304982 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-2v4mj" Nov 24 09:19:45 crc kubenswrapper[4691]: I1124 09:19:45.907825 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-2v4mj" Nov 24 09:19:47 crc kubenswrapper[4691]: I1124 09:19:47.442478 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2v4mj"] Nov 24 09:19:47 crc kubenswrapper[4691]: I1124 09:19:47.837655 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-2v4mj" podUID="cef97f77-31fb-40ea-ba25-e88615fb4b75" containerName="registry-server" containerID="cri-o://d678b67ce9014695e5c0d824986346ffdb749d55378d3a160e501adb7f9205af" gracePeriod=2 Nov 24 09:19:48 crc kubenswrapper[4691]: I1124 09:19:48.310638 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2v4mj" Nov 24 09:19:48 crc kubenswrapper[4691]: I1124 09:19:48.500983 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cef97f77-31fb-40ea-ba25-e88615fb4b75-catalog-content\") pod \"cef97f77-31fb-40ea-ba25-e88615fb4b75\" (UID: \"cef97f77-31fb-40ea-ba25-e88615fb4b75\") " Nov 24 09:19:48 crc kubenswrapper[4691]: I1124 09:19:48.501129 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g6lmz\" (UniqueName: \"kubernetes.io/projected/cef97f77-31fb-40ea-ba25-e88615fb4b75-kube-api-access-g6lmz\") pod \"cef97f77-31fb-40ea-ba25-e88615fb4b75\" (UID: \"cef97f77-31fb-40ea-ba25-e88615fb4b75\") " Nov 24 09:19:48 crc kubenswrapper[4691]: I1124 09:19:48.501259 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cef97f77-31fb-40ea-ba25-e88615fb4b75-utilities\") pod \"cef97f77-31fb-40ea-ba25-e88615fb4b75\" (UID: \"cef97f77-31fb-40ea-ba25-e88615fb4b75\") " Nov 24 09:19:48 crc kubenswrapper[4691]: I1124 09:19:48.502475 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cef97f77-31fb-40ea-ba25-e88615fb4b75-utilities" (OuterVolumeSpecName: "utilities") pod "cef97f77-31fb-40ea-ba25-e88615fb4b75" (UID: "cef97f77-31fb-40ea-ba25-e88615fb4b75"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 09:19:48 crc kubenswrapper[4691]: I1124 09:19:48.514858 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cef97f77-31fb-40ea-ba25-e88615fb4b75-kube-api-access-g6lmz" (OuterVolumeSpecName: "kube-api-access-g6lmz") pod "cef97f77-31fb-40ea-ba25-e88615fb4b75" (UID: "cef97f77-31fb-40ea-ba25-e88615fb4b75"). InnerVolumeSpecName "kube-api-access-g6lmz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 09:19:48 crc kubenswrapper[4691]: I1124 09:19:48.560706 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cef97f77-31fb-40ea-ba25-e88615fb4b75-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cef97f77-31fb-40ea-ba25-e88615fb4b75" (UID: "cef97f77-31fb-40ea-ba25-e88615fb4b75"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 09:19:48 crc kubenswrapper[4691]: I1124 09:19:48.603234 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cef97f77-31fb-40ea-ba25-e88615fb4b75-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 09:19:48 crc kubenswrapper[4691]: I1124 09:19:48.603848 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g6lmz\" (UniqueName: \"kubernetes.io/projected/cef97f77-31fb-40ea-ba25-e88615fb4b75-kube-api-access-g6lmz\") on node \"crc\" DevicePath \"\"" Nov 24 09:19:48 crc kubenswrapper[4691]: I1124 09:19:48.603865 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cef97f77-31fb-40ea-ba25-e88615fb4b75-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 09:19:48 crc kubenswrapper[4691]: I1124 09:19:48.849184 4691 generic.go:334] "Generic (PLEG): container finished" podID="cef97f77-31fb-40ea-ba25-e88615fb4b75" containerID="d678b67ce9014695e5c0d824986346ffdb749d55378d3a160e501adb7f9205af" exitCode=0 Nov 24 09:19:48 crc kubenswrapper[4691]: I1124 09:19:48.849233 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2v4mj" event={"ID":"cef97f77-31fb-40ea-ba25-e88615fb4b75","Type":"ContainerDied","Data":"d678b67ce9014695e5c0d824986346ffdb749d55378d3a160e501adb7f9205af"} Nov 24 09:19:48 crc kubenswrapper[4691]: I1124 09:19:48.849263 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2v4mj" event={"ID":"cef97f77-31fb-40ea-ba25-e88615fb4b75","Type":"ContainerDied","Data":"9d9e72382a27e7a210e92f358dfc3e51ce1b8c0fdf793264787607d6a923d91c"} Nov 24 09:19:48 crc kubenswrapper[4691]: I1124 09:19:48.849280 4691 scope.go:117] "RemoveContainer" containerID="d678b67ce9014695e5c0d824986346ffdb749d55378d3a160e501adb7f9205af" Nov 24 09:19:48 crc kubenswrapper[4691]: I1124 09:19:48.849414 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2v4mj" Nov 24 09:19:48 crc kubenswrapper[4691]: I1124 09:19:48.871717 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2v4mj"] Nov 24 09:19:48 crc kubenswrapper[4691]: I1124 09:19:48.879641 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-2v4mj"] Nov 24 09:19:48 crc kubenswrapper[4691]: I1124 09:19:48.880522 4691 scope.go:117] "RemoveContainer" containerID="7ae2e941c8a7e76fa0bbff857b812f2396e069f459f25b38c7d51f56f0050523" Nov 24 09:19:48 crc kubenswrapper[4691]: I1124 09:19:48.909626 4691 scope.go:117] "RemoveContainer" containerID="a2de2ceaab828bedd3c3ac86347e138aabfa62cf4a2ff756ebfa98f879e147bd" Nov 24 09:19:48 crc kubenswrapper[4691]: I1124 09:19:48.955203 4691 scope.go:117] "RemoveContainer" containerID="d678b67ce9014695e5c0d824986346ffdb749d55378d3a160e501adb7f9205af" Nov 24 09:19:48 crc kubenswrapper[4691]: E1124 09:19:48.955622 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d678b67ce9014695e5c0d824986346ffdb749d55378d3a160e501adb7f9205af\": container with ID starting with d678b67ce9014695e5c0d824986346ffdb749d55378d3a160e501adb7f9205af not found: ID does not exist" containerID="d678b67ce9014695e5c0d824986346ffdb749d55378d3a160e501adb7f9205af" Nov 24 09:19:48 crc kubenswrapper[4691]: I1124 09:19:48.955672 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d678b67ce9014695e5c0d824986346ffdb749d55378d3a160e501adb7f9205af"} err="failed to get container status \"d678b67ce9014695e5c0d824986346ffdb749d55378d3a160e501adb7f9205af\": rpc error: code = NotFound desc = could not find container \"d678b67ce9014695e5c0d824986346ffdb749d55378d3a160e501adb7f9205af\": container with ID starting with d678b67ce9014695e5c0d824986346ffdb749d55378d3a160e501adb7f9205af not found: ID does not exist" Nov 24 09:19:48 crc kubenswrapper[4691]: I1124 09:19:48.955699 4691 scope.go:117] "RemoveContainer" containerID="7ae2e941c8a7e76fa0bbff857b812f2396e069f459f25b38c7d51f56f0050523" Nov 24 09:19:48 crc kubenswrapper[4691]: E1124 09:19:48.955960 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7ae2e941c8a7e76fa0bbff857b812f2396e069f459f25b38c7d51f56f0050523\": container with ID starting with 7ae2e941c8a7e76fa0bbff857b812f2396e069f459f25b38c7d51f56f0050523 not found: ID does not exist" containerID="7ae2e941c8a7e76fa0bbff857b812f2396e069f459f25b38c7d51f56f0050523" Nov 24 09:19:48 crc kubenswrapper[4691]: I1124 09:19:48.955994 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7ae2e941c8a7e76fa0bbff857b812f2396e069f459f25b38c7d51f56f0050523"} err="failed to get container status \"7ae2e941c8a7e76fa0bbff857b812f2396e069f459f25b38c7d51f56f0050523\": rpc error: code = NotFound desc = could not find container \"7ae2e941c8a7e76fa0bbff857b812f2396e069f459f25b38c7d51f56f0050523\": container with ID starting with 7ae2e941c8a7e76fa0bbff857b812f2396e069f459f25b38c7d51f56f0050523 not found: ID does not exist" Nov 24 09:19:48 crc kubenswrapper[4691]: I1124 09:19:48.956021 4691 scope.go:117] "RemoveContainer" containerID="a2de2ceaab828bedd3c3ac86347e138aabfa62cf4a2ff756ebfa98f879e147bd" Nov 24 09:19:48 crc kubenswrapper[4691]: E1124 09:19:48.956297 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a2de2ceaab828bedd3c3ac86347e138aabfa62cf4a2ff756ebfa98f879e147bd\": container with ID starting with a2de2ceaab828bedd3c3ac86347e138aabfa62cf4a2ff756ebfa98f879e147bd not found: ID does not exist" containerID="a2de2ceaab828bedd3c3ac86347e138aabfa62cf4a2ff756ebfa98f879e147bd" Nov 24 09:19:48 crc kubenswrapper[4691]: I1124 09:19:48.956324 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2de2ceaab828bedd3c3ac86347e138aabfa62cf4a2ff756ebfa98f879e147bd"} err="failed to get container status \"a2de2ceaab828bedd3c3ac86347e138aabfa62cf4a2ff756ebfa98f879e147bd\": rpc error: code = NotFound desc = could not find container \"a2de2ceaab828bedd3c3ac86347e138aabfa62cf4a2ff756ebfa98f879e147bd\": container with ID starting with a2de2ceaab828bedd3c3ac86347e138aabfa62cf4a2ff756ebfa98f879e147bd not found: ID does not exist" Nov 24 09:19:49 crc kubenswrapper[4691]: I1124 09:19:49.788861 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-j8frn" Nov 24 09:19:49 crc kubenswrapper[4691]: I1124 09:19:49.789515 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-j8frn" Nov 24 09:19:49 crc kubenswrapper[4691]: I1124 09:19:49.849381 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-j8frn" Nov 24 09:19:49 crc kubenswrapper[4691]: I1124 09:19:49.910309 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-j8frn" Nov 24 09:19:50 crc kubenswrapper[4691]: I1124 09:19:50.772189 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cef97f77-31fb-40ea-ba25-e88615fb4b75" path="/var/lib/kubelet/pods/cef97f77-31fb-40ea-ba25-e88615fb4b75/volumes" Nov 24 09:19:51 crc kubenswrapper[4691]: I1124 09:19:51.089560 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 09:19:51 crc kubenswrapper[4691]: I1124 09:19:51.089643 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 09:19:51 crc kubenswrapper[4691]: I1124 09:19:51.643428 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-j8frn"] Nov 24 09:19:51 crc kubenswrapper[4691]: I1124 09:19:51.881063 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-j8frn" podUID="2702915a-b038-4b30-a23d-e2d9de66243f" containerName="registry-server" containerID="cri-o://ef226bb626dc611fa28490cc91d9d7b8f2102450c73fa6652cfc91afe68d7ae5" gracePeriod=2 Nov 24 09:19:52 crc kubenswrapper[4691]: I1124 09:19:52.395542 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j8frn" Nov 24 09:19:52 crc kubenswrapper[4691]: I1124 09:19:52.579082 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6l2ms\" (UniqueName: \"kubernetes.io/projected/2702915a-b038-4b30-a23d-e2d9de66243f-kube-api-access-6l2ms\") pod \"2702915a-b038-4b30-a23d-e2d9de66243f\" (UID: \"2702915a-b038-4b30-a23d-e2d9de66243f\") " Nov 24 09:19:52 crc kubenswrapper[4691]: I1124 09:19:52.579351 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2702915a-b038-4b30-a23d-e2d9de66243f-catalog-content\") pod \"2702915a-b038-4b30-a23d-e2d9de66243f\" (UID: \"2702915a-b038-4b30-a23d-e2d9de66243f\") " Nov 24 09:19:52 crc kubenswrapper[4691]: I1124 09:19:52.579422 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2702915a-b038-4b30-a23d-e2d9de66243f-utilities\") pod \"2702915a-b038-4b30-a23d-e2d9de66243f\" (UID: \"2702915a-b038-4b30-a23d-e2d9de66243f\") " Nov 24 09:19:52 crc kubenswrapper[4691]: I1124 09:19:52.580398 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2702915a-b038-4b30-a23d-e2d9de66243f-utilities" (OuterVolumeSpecName: "utilities") pod "2702915a-b038-4b30-a23d-e2d9de66243f" (UID: "2702915a-b038-4b30-a23d-e2d9de66243f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 09:19:52 crc kubenswrapper[4691]: I1124 09:19:52.585131 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2702915a-b038-4b30-a23d-e2d9de66243f-kube-api-access-6l2ms" (OuterVolumeSpecName: "kube-api-access-6l2ms") pod "2702915a-b038-4b30-a23d-e2d9de66243f" (UID: "2702915a-b038-4b30-a23d-e2d9de66243f"). InnerVolumeSpecName "kube-api-access-6l2ms". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 09:19:52 crc kubenswrapper[4691]: I1124 09:19:52.601643 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2702915a-b038-4b30-a23d-e2d9de66243f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2702915a-b038-4b30-a23d-e2d9de66243f" (UID: "2702915a-b038-4b30-a23d-e2d9de66243f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 09:19:52 crc kubenswrapper[4691]: I1124 09:19:52.682575 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2702915a-b038-4b30-a23d-e2d9de66243f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 09:19:52 crc kubenswrapper[4691]: I1124 09:19:52.682614 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2702915a-b038-4b30-a23d-e2d9de66243f-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 09:19:52 crc kubenswrapper[4691]: I1124 09:19:52.682630 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6l2ms\" (UniqueName: \"kubernetes.io/projected/2702915a-b038-4b30-a23d-e2d9de66243f-kube-api-access-6l2ms\") on node \"crc\" DevicePath \"\"" Nov 24 09:19:52 crc kubenswrapper[4691]: I1124 09:19:52.892980 4691 generic.go:334] "Generic (PLEG): container finished" podID="2702915a-b038-4b30-a23d-e2d9de66243f" containerID="ef226bb626dc611fa28490cc91d9d7b8f2102450c73fa6652cfc91afe68d7ae5" exitCode=0 Nov 24 09:19:52 crc kubenswrapper[4691]: I1124 09:19:52.893062 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j8frn" event={"ID":"2702915a-b038-4b30-a23d-e2d9de66243f","Type":"ContainerDied","Data":"ef226bb626dc611fa28490cc91d9d7b8f2102450c73fa6652cfc91afe68d7ae5"} Nov 24 09:19:52 crc kubenswrapper[4691]: I1124 09:19:52.893275 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j8frn" event={"ID":"2702915a-b038-4b30-a23d-e2d9de66243f","Type":"ContainerDied","Data":"6d5e3162a9f4c7c31f401bce6cb3f81a3aab0946d587c7ef2024efea9c677d5d"} Nov 24 09:19:52 crc kubenswrapper[4691]: I1124 09:19:52.893300 4691 scope.go:117] "RemoveContainer" containerID="ef226bb626dc611fa28490cc91d9d7b8f2102450c73fa6652cfc91afe68d7ae5" Nov 24 09:19:52 crc kubenswrapper[4691]: I1124 09:19:52.893077 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j8frn" Nov 24 09:19:52 crc kubenswrapper[4691]: I1124 09:19:52.913990 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-j8frn"] Nov 24 09:19:52 crc kubenswrapper[4691]: I1124 09:19:52.920471 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-j8frn"] Nov 24 09:19:52 crc kubenswrapper[4691]: I1124 09:19:52.920993 4691 scope.go:117] "RemoveContainer" containerID="793c785903b177c5c0a097ad92056036748246c73bc13c0b346e50677a8a5712" Nov 24 09:19:52 crc kubenswrapper[4691]: I1124 09:19:52.953326 4691 scope.go:117] "RemoveContainer" containerID="b2296338f25c3139273265fcd62104ca57d6aef5f554e648adfb34cc8f1ed2f0" Nov 24 09:19:52 crc kubenswrapper[4691]: I1124 09:19:52.994932 4691 scope.go:117] "RemoveContainer" containerID="ef226bb626dc611fa28490cc91d9d7b8f2102450c73fa6652cfc91afe68d7ae5" Nov 24 09:19:52 crc kubenswrapper[4691]: E1124 09:19:52.995326 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef226bb626dc611fa28490cc91d9d7b8f2102450c73fa6652cfc91afe68d7ae5\": container with ID starting with ef226bb626dc611fa28490cc91d9d7b8f2102450c73fa6652cfc91afe68d7ae5 not found: ID does not exist" containerID="ef226bb626dc611fa28490cc91d9d7b8f2102450c73fa6652cfc91afe68d7ae5" Nov 24 09:19:52 crc kubenswrapper[4691]: I1124 09:19:52.995381 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef226bb626dc611fa28490cc91d9d7b8f2102450c73fa6652cfc91afe68d7ae5"} err="failed to get container status \"ef226bb626dc611fa28490cc91d9d7b8f2102450c73fa6652cfc91afe68d7ae5\": rpc error: code = NotFound desc = could not find container \"ef226bb626dc611fa28490cc91d9d7b8f2102450c73fa6652cfc91afe68d7ae5\": container with ID starting with ef226bb626dc611fa28490cc91d9d7b8f2102450c73fa6652cfc91afe68d7ae5 not found: ID does not exist" Nov 24 09:19:52 crc kubenswrapper[4691]: I1124 09:19:52.995420 4691 scope.go:117] "RemoveContainer" containerID="793c785903b177c5c0a097ad92056036748246c73bc13c0b346e50677a8a5712" Nov 24 09:19:52 crc kubenswrapper[4691]: E1124 09:19:52.995922 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"793c785903b177c5c0a097ad92056036748246c73bc13c0b346e50677a8a5712\": container with ID starting with 793c785903b177c5c0a097ad92056036748246c73bc13c0b346e50677a8a5712 not found: ID does not exist" containerID="793c785903b177c5c0a097ad92056036748246c73bc13c0b346e50677a8a5712" Nov 24 09:19:52 crc kubenswrapper[4691]: I1124 09:19:52.995976 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"793c785903b177c5c0a097ad92056036748246c73bc13c0b346e50677a8a5712"} err="failed to get container status \"793c785903b177c5c0a097ad92056036748246c73bc13c0b346e50677a8a5712\": rpc error: code = NotFound desc = could not find container \"793c785903b177c5c0a097ad92056036748246c73bc13c0b346e50677a8a5712\": container with ID starting with 793c785903b177c5c0a097ad92056036748246c73bc13c0b346e50677a8a5712 not found: ID does not exist" Nov 24 09:19:52 crc kubenswrapper[4691]: I1124 09:19:52.996012 4691 scope.go:117] "RemoveContainer" containerID="b2296338f25c3139273265fcd62104ca57d6aef5f554e648adfb34cc8f1ed2f0" Nov 24 09:19:52 crc kubenswrapper[4691]: E1124 09:19:52.996373 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b2296338f25c3139273265fcd62104ca57d6aef5f554e648adfb34cc8f1ed2f0\": container with ID starting with b2296338f25c3139273265fcd62104ca57d6aef5f554e648adfb34cc8f1ed2f0 not found: ID does not exist" containerID="b2296338f25c3139273265fcd62104ca57d6aef5f554e648adfb34cc8f1ed2f0" Nov 24 09:19:52 crc kubenswrapper[4691]: I1124 09:19:52.996413 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b2296338f25c3139273265fcd62104ca57d6aef5f554e648adfb34cc8f1ed2f0"} err="failed to get container status \"b2296338f25c3139273265fcd62104ca57d6aef5f554e648adfb34cc8f1ed2f0\": rpc error: code = NotFound desc = could not find container \"b2296338f25c3139273265fcd62104ca57d6aef5f554e648adfb34cc8f1ed2f0\": container with ID starting with b2296338f25c3139273265fcd62104ca57d6aef5f554e648adfb34cc8f1ed2f0 not found: ID does not exist" Nov 24 09:19:54 crc kubenswrapper[4691]: I1124 09:19:54.773907 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2702915a-b038-4b30-a23d-e2d9de66243f" path="/var/lib/kubelet/pods/2702915a-b038-4b30-a23d-e2d9de66243f/volumes" Nov 24 09:20:21 crc kubenswrapper[4691]: I1124 09:20:21.089216 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 09:20:21 crc kubenswrapper[4691]: I1124 09:20:21.089908 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 09:20:51 crc kubenswrapper[4691]: I1124 09:20:51.089005 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 09:20:51 crc kubenswrapper[4691]: I1124 09:20:51.089745 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 09:20:51 crc kubenswrapper[4691]: I1124 09:20:51.089796 4691 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 09:20:51 crc kubenswrapper[4691]: I1124 09:20:51.090678 4691 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f0361322acc6980ed49e64daf4258e8bc09b1cf31948b359c5ff23352e9d3b26"} pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 09:20:51 crc kubenswrapper[4691]: I1124 09:20:51.090741 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" containerID="cri-o://f0361322acc6980ed49e64daf4258e8bc09b1cf31948b359c5ff23352e9d3b26" gracePeriod=600 Nov 24 09:20:51 crc kubenswrapper[4691]: E1124 09:20:51.220406 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:20:51 crc kubenswrapper[4691]: I1124 09:20:51.305097 4691 generic.go:334] "Generic (PLEG): container finished" podID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerID="f0361322acc6980ed49e64daf4258e8bc09b1cf31948b359c5ff23352e9d3b26" exitCode=0 Nov 24 09:20:51 crc kubenswrapper[4691]: I1124 09:20:51.305148 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerDied","Data":"f0361322acc6980ed49e64daf4258e8bc09b1cf31948b359c5ff23352e9d3b26"} Nov 24 09:20:51 crc kubenswrapper[4691]: I1124 09:20:51.305188 4691 scope.go:117] "RemoveContainer" containerID="c3bc47ddb7c02824987171379a4855ed137dcbbad67fcea843b9fbf709d1019a" Nov 24 09:20:51 crc kubenswrapper[4691]: I1124 09:20:51.306071 4691 scope.go:117] "RemoveContainer" containerID="f0361322acc6980ed49e64daf4258e8bc09b1cf31948b359c5ff23352e9d3b26" Nov 24 09:20:51 crc kubenswrapper[4691]: E1124 09:20:51.306464 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:21:02 crc kubenswrapper[4691]: I1124 09:21:02.761314 4691 scope.go:117] "RemoveContainer" containerID="f0361322acc6980ed49e64daf4258e8bc09b1cf31948b359c5ff23352e9d3b26" Nov 24 09:21:02 crc kubenswrapper[4691]: E1124 09:21:02.762347 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:21:16 crc kubenswrapper[4691]: I1124 09:21:16.775847 4691 scope.go:117] "RemoveContainer" containerID="f0361322acc6980ed49e64daf4258e8bc09b1cf31948b359c5ff23352e9d3b26" Nov 24 09:21:16 crc kubenswrapper[4691]: E1124 09:21:16.777330 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:21:29 crc kubenswrapper[4691]: I1124 09:21:29.761561 4691 scope.go:117] "RemoveContainer" containerID="f0361322acc6980ed49e64daf4258e8bc09b1cf31948b359c5ff23352e9d3b26" Nov 24 09:21:29 crc kubenswrapper[4691]: E1124 09:21:29.763267 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:21:41 crc kubenswrapper[4691]: I1124 09:21:41.760624 4691 scope.go:117] "RemoveContainer" containerID="f0361322acc6980ed49e64daf4258e8bc09b1cf31948b359c5ff23352e9d3b26" Nov 24 09:21:41 crc kubenswrapper[4691]: E1124 09:21:41.761294 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:21:54 crc kubenswrapper[4691]: I1124 09:21:54.761220 4691 scope.go:117] "RemoveContainer" containerID="f0361322acc6980ed49e64daf4258e8bc09b1cf31948b359c5ff23352e9d3b26" Nov 24 09:21:54 crc kubenswrapper[4691]: E1124 09:21:54.762522 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:22:08 crc kubenswrapper[4691]: I1124 09:22:08.793002 4691 scope.go:117] "RemoveContainer" containerID="f0361322acc6980ed49e64daf4258e8bc09b1cf31948b359c5ff23352e9d3b26" Nov 24 09:22:08 crc kubenswrapper[4691]: E1124 09:22:08.794184 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:22:22 crc kubenswrapper[4691]: I1124 09:22:22.760786 4691 scope.go:117] "RemoveContainer" containerID="f0361322acc6980ed49e64daf4258e8bc09b1cf31948b359c5ff23352e9d3b26" Nov 24 09:22:22 crc kubenswrapper[4691]: E1124 09:22:22.762013 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:22:34 crc kubenswrapper[4691]: I1124 09:22:34.761168 4691 scope.go:117] "RemoveContainer" containerID="f0361322acc6980ed49e64daf4258e8bc09b1cf31948b359c5ff23352e9d3b26" Nov 24 09:22:34 crc kubenswrapper[4691]: E1124 09:22:34.762707 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:22:48 crc kubenswrapper[4691]: I1124 09:22:48.770721 4691 scope.go:117] "RemoveContainer" containerID="f0361322acc6980ed49e64daf4258e8bc09b1cf31948b359c5ff23352e9d3b26" Nov 24 09:22:48 crc kubenswrapper[4691]: E1124 09:22:48.771985 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:22:57 crc kubenswrapper[4691]: I1124 09:22:57.740540 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-dxnpr"] Nov 24 09:22:57 crc kubenswrapper[4691]: E1124 09:22:57.741792 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cef97f77-31fb-40ea-ba25-e88615fb4b75" containerName="registry-server" Nov 24 09:22:57 crc kubenswrapper[4691]: I1124 09:22:57.741811 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="cef97f77-31fb-40ea-ba25-e88615fb4b75" containerName="registry-server" Nov 24 09:22:57 crc kubenswrapper[4691]: E1124 09:22:57.741826 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2702915a-b038-4b30-a23d-e2d9de66243f" containerName="extract-utilities" Nov 24 09:22:57 crc kubenswrapper[4691]: I1124 09:22:57.741836 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="2702915a-b038-4b30-a23d-e2d9de66243f" containerName="extract-utilities" Nov 24 09:22:57 crc kubenswrapper[4691]: E1124 09:22:57.741864 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cef97f77-31fb-40ea-ba25-e88615fb4b75" containerName="extract-content" Nov 24 09:22:57 crc kubenswrapper[4691]: I1124 09:22:57.741873 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="cef97f77-31fb-40ea-ba25-e88615fb4b75" containerName="extract-content" Nov 24 09:22:57 crc kubenswrapper[4691]: E1124 09:22:57.741894 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2702915a-b038-4b30-a23d-e2d9de66243f" containerName="registry-server" Nov 24 09:22:57 crc kubenswrapper[4691]: I1124 09:22:57.741902 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="2702915a-b038-4b30-a23d-e2d9de66243f" containerName="registry-server" Nov 24 09:22:57 crc kubenswrapper[4691]: E1124 09:22:57.741915 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cef97f77-31fb-40ea-ba25-e88615fb4b75" containerName="extract-utilities" Nov 24 09:22:57 crc kubenswrapper[4691]: I1124 09:22:57.741922 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="cef97f77-31fb-40ea-ba25-e88615fb4b75" containerName="extract-utilities" Nov 24 09:22:57 crc kubenswrapper[4691]: E1124 09:22:57.741945 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2702915a-b038-4b30-a23d-e2d9de66243f" containerName="extract-content" Nov 24 09:22:57 crc kubenswrapper[4691]: I1124 09:22:57.741952 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="2702915a-b038-4b30-a23d-e2d9de66243f" containerName="extract-content" Nov 24 09:22:57 crc kubenswrapper[4691]: I1124 09:22:57.742414 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="2702915a-b038-4b30-a23d-e2d9de66243f" containerName="registry-server" Nov 24 09:22:57 crc kubenswrapper[4691]: I1124 09:22:57.742439 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="cef97f77-31fb-40ea-ba25-e88615fb4b75" containerName="registry-server" Nov 24 09:22:57 crc kubenswrapper[4691]: I1124 09:22:57.744114 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dxnpr" Nov 24 09:22:57 crc kubenswrapper[4691]: I1124 09:22:57.760985 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dxnpr"] Nov 24 09:22:57 crc kubenswrapper[4691]: I1124 09:22:57.881935 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d053d137-22fe-4850-8694-717346625cf6-catalog-content\") pod \"redhat-operators-dxnpr\" (UID: \"d053d137-22fe-4850-8694-717346625cf6\") " pod="openshift-marketplace/redhat-operators-dxnpr" Nov 24 09:22:57 crc kubenswrapper[4691]: I1124 09:22:57.882027 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d053d137-22fe-4850-8694-717346625cf6-utilities\") pod \"redhat-operators-dxnpr\" (UID: \"d053d137-22fe-4850-8694-717346625cf6\") " pod="openshift-marketplace/redhat-operators-dxnpr" Nov 24 09:22:57 crc kubenswrapper[4691]: I1124 09:22:57.882122 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nqv4x\" (UniqueName: \"kubernetes.io/projected/d053d137-22fe-4850-8694-717346625cf6-kube-api-access-nqv4x\") pod \"redhat-operators-dxnpr\" (UID: \"d053d137-22fe-4850-8694-717346625cf6\") " pod="openshift-marketplace/redhat-operators-dxnpr" Nov 24 09:22:57 crc kubenswrapper[4691]: I1124 09:22:57.983820 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d053d137-22fe-4850-8694-717346625cf6-catalog-content\") pod \"redhat-operators-dxnpr\" (UID: \"d053d137-22fe-4850-8694-717346625cf6\") " pod="openshift-marketplace/redhat-operators-dxnpr" Nov 24 09:22:57 crc kubenswrapper[4691]: I1124 09:22:57.983889 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d053d137-22fe-4850-8694-717346625cf6-utilities\") pod \"redhat-operators-dxnpr\" (UID: \"d053d137-22fe-4850-8694-717346625cf6\") " pod="openshift-marketplace/redhat-operators-dxnpr" Nov 24 09:22:57 crc kubenswrapper[4691]: I1124 09:22:57.983935 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nqv4x\" (UniqueName: \"kubernetes.io/projected/d053d137-22fe-4850-8694-717346625cf6-kube-api-access-nqv4x\") pod \"redhat-operators-dxnpr\" (UID: \"d053d137-22fe-4850-8694-717346625cf6\") " pod="openshift-marketplace/redhat-operators-dxnpr" Nov 24 09:22:57 crc kubenswrapper[4691]: I1124 09:22:57.984720 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d053d137-22fe-4850-8694-717346625cf6-catalog-content\") pod \"redhat-operators-dxnpr\" (UID: \"d053d137-22fe-4850-8694-717346625cf6\") " pod="openshift-marketplace/redhat-operators-dxnpr" Nov 24 09:22:57 crc kubenswrapper[4691]: I1124 09:22:57.984948 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d053d137-22fe-4850-8694-717346625cf6-utilities\") pod \"redhat-operators-dxnpr\" (UID: \"d053d137-22fe-4850-8694-717346625cf6\") " pod="openshift-marketplace/redhat-operators-dxnpr" Nov 24 09:22:58 crc kubenswrapper[4691]: I1124 09:22:58.009041 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nqv4x\" (UniqueName: \"kubernetes.io/projected/d053d137-22fe-4850-8694-717346625cf6-kube-api-access-nqv4x\") pod \"redhat-operators-dxnpr\" (UID: \"d053d137-22fe-4850-8694-717346625cf6\") " pod="openshift-marketplace/redhat-operators-dxnpr" Nov 24 09:22:58 crc kubenswrapper[4691]: I1124 09:22:58.073970 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dxnpr" Nov 24 09:22:58 crc kubenswrapper[4691]: I1124 09:22:58.590410 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dxnpr"] Nov 24 09:22:59 crc kubenswrapper[4691]: I1124 09:22:59.556966 4691 generic.go:334] "Generic (PLEG): container finished" podID="d053d137-22fe-4850-8694-717346625cf6" containerID="97d98913c500bcc4b1b26c901ddddb27b5c656fa910901dbfac4aa5949cb6332" exitCode=0 Nov 24 09:22:59 crc kubenswrapper[4691]: I1124 09:22:59.557052 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dxnpr" event={"ID":"d053d137-22fe-4850-8694-717346625cf6","Type":"ContainerDied","Data":"97d98913c500bcc4b1b26c901ddddb27b5c656fa910901dbfac4aa5949cb6332"} Nov 24 09:22:59 crc kubenswrapper[4691]: I1124 09:22:59.557288 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dxnpr" event={"ID":"d053d137-22fe-4850-8694-717346625cf6","Type":"ContainerStarted","Data":"cec449daebd459f5352f0ec372ada1f9fc0777ea07b48c563090d067454b0240"} Nov 24 09:23:01 crc kubenswrapper[4691]: I1124 09:23:01.761026 4691 scope.go:117] "RemoveContainer" containerID="f0361322acc6980ed49e64daf4258e8bc09b1cf31948b359c5ff23352e9d3b26" Nov 24 09:23:01 crc kubenswrapper[4691]: E1124 09:23:01.761735 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:23:09 crc kubenswrapper[4691]: I1124 09:23:09.654710 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dxnpr" event={"ID":"d053d137-22fe-4850-8694-717346625cf6","Type":"ContainerStarted","Data":"f7844be9dfc4967810ec68c27ce2c9b33d6cb9383a2751cf8a08974be2f6c0bc"} Nov 24 09:23:12 crc kubenswrapper[4691]: I1124 09:23:12.692109 4691 generic.go:334] "Generic (PLEG): container finished" podID="d053d137-22fe-4850-8694-717346625cf6" containerID="f7844be9dfc4967810ec68c27ce2c9b33d6cb9383a2751cf8a08974be2f6c0bc" exitCode=0 Nov 24 09:23:12 crc kubenswrapper[4691]: I1124 09:23:12.692224 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dxnpr" event={"ID":"d053d137-22fe-4850-8694-717346625cf6","Type":"ContainerDied","Data":"f7844be9dfc4967810ec68c27ce2c9b33d6cb9383a2751cf8a08974be2f6c0bc"} Nov 24 09:23:13 crc kubenswrapper[4691]: I1124 09:23:13.701839 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dxnpr" event={"ID":"d053d137-22fe-4850-8694-717346625cf6","Type":"ContainerStarted","Data":"b678426d1dd74e081f13bb57fe18b61fddc401f96c2fe29c202932e86371b7b1"} Nov 24 09:23:13 crc kubenswrapper[4691]: I1124 09:23:13.727125 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-dxnpr" podStartSLOduration=3.071851158 podStartE2EDuration="16.7271042s" podCreationTimestamp="2025-11-24 09:22:57 +0000 UTC" firstStartedPulling="2025-11-24 09:22:59.559462764 +0000 UTC m=+5141.558412023" lastFinishedPulling="2025-11-24 09:23:13.214715786 +0000 UTC m=+5155.213665065" observedRunningTime="2025-11-24 09:23:13.7207757 +0000 UTC m=+5155.719724949" watchObservedRunningTime="2025-11-24 09:23:13.7271042 +0000 UTC m=+5155.726053459" Nov 24 09:23:14 crc kubenswrapper[4691]: I1124 09:23:14.760725 4691 scope.go:117] "RemoveContainer" containerID="f0361322acc6980ed49e64daf4258e8bc09b1cf31948b359c5ff23352e9d3b26" Nov 24 09:23:14 crc kubenswrapper[4691]: E1124 09:23:14.761358 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:23:18 crc kubenswrapper[4691]: I1124 09:23:18.074373 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-dxnpr" Nov 24 09:23:18 crc kubenswrapper[4691]: I1124 09:23:18.074786 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-dxnpr" Nov 24 09:23:19 crc kubenswrapper[4691]: I1124 09:23:19.137972 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-dxnpr" podUID="d053d137-22fe-4850-8694-717346625cf6" containerName="registry-server" probeResult="failure" output=< Nov 24 09:23:19 crc kubenswrapper[4691]: timeout: failed to connect service ":50051" within 1s Nov 24 09:23:19 crc kubenswrapper[4691]: > Nov 24 09:23:25 crc kubenswrapper[4691]: I1124 09:23:25.761085 4691 scope.go:117] "RemoveContainer" containerID="f0361322acc6980ed49e64daf4258e8bc09b1cf31948b359c5ff23352e9d3b26" Nov 24 09:23:25 crc kubenswrapper[4691]: E1124 09:23:25.762195 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:23:28 crc kubenswrapper[4691]: I1124 09:23:28.396986 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-dxnpr" Nov 24 09:23:28 crc kubenswrapper[4691]: I1124 09:23:28.446560 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-dxnpr" Nov 24 09:23:28 crc kubenswrapper[4691]: I1124 09:23:28.780928 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dxnpr"] Nov 24 09:23:28 crc kubenswrapper[4691]: I1124 09:23:28.936821 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gj4c6"] Nov 24 09:23:28 crc kubenswrapper[4691]: I1124 09:23:28.937093 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-gj4c6" podUID="7b6731c0-8185-4b19-8f1e-c3a6b85b972e" containerName="registry-server" containerID="cri-o://2551c0d3fb0952447c6b18e9461be9a2e887851b6ba394801deb138fddd13f2c" gracePeriod=2 Nov 24 09:23:29 crc kubenswrapper[4691]: I1124 09:23:29.470311 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gj4c6" Nov 24 09:23:29 crc kubenswrapper[4691]: I1124 09:23:29.627965 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b6731c0-8185-4b19-8f1e-c3a6b85b972e-catalog-content\") pod \"7b6731c0-8185-4b19-8f1e-c3a6b85b972e\" (UID: \"7b6731c0-8185-4b19-8f1e-c3a6b85b972e\") " Nov 24 09:23:29 crc kubenswrapper[4691]: I1124 09:23:29.628050 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c8t6g\" (UniqueName: \"kubernetes.io/projected/7b6731c0-8185-4b19-8f1e-c3a6b85b972e-kube-api-access-c8t6g\") pod \"7b6731c0-8185-4b19-8f1e-c3a6b85b972e\" (UID: \"7b6731c0-8185-4b19-8f1e-c3a6b85b972e\") " Nov 24 09:23:29 crc kubenswrapper[4691]: I1124 09:23:29.628124 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b6731c0-8185-4b19-8f1e-c3a6b85b972e-utilities\") pod \"7b6731c0-8185-4b19-8f1e-c3a6b85b972e\" (UID: \"7b6731c0-8185-4b19-8f1e-c3a6b85b972e\") " Nov 24 09:23:29 crc kubenswrapper[4691]: I1124 09:23:29.630320 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7b6731c0-8185-4b19-8f1e-c3a6b85b972e-utilities" (OuterVolumeSpecName: "utilities") pod "7b6731c0-8185-4b19-8f1e-c3a6b85b972e" (UID: "7b6731c0-8185-4b19-8f1e-c3a6b85b972e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 09:23:29 crc kubenswrapper[4691]: I1124 09:23:29.635838 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b6731c0-8185-4b19-8f1e-c3a6b85b972e-kube-api-access-c8t6g" (OuterVolumeSpecName: "kube-api-access-c8t6g") pod "7b6731c0-8185-4b19-8f1e-c3a6b85b972e" (UID: "7b6731c0-8185-4b19-8f1e-c3a6b85b972e"). InnerVolumeSpecName "kube-api-access-c8t6g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 09:23:29 crc kubenswrapper[4691]: I1124 09:23:29.730960 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c8t6g\" (UniqueName: \"kubernetes.io/projected/7b6731c0-8185-4b19-8f1e-c3a6b85b972e-kube-api-access-c8t6g\") on node \"crc\" DevicePath \"\"" Nov 24 09:23:29 crc kubenswrapper[4691]: I1124 09:23:29.731299 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b6731c0-8185-4b19-8f1e-c3a6b85b972e-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 09:23:29 crc kubenswrapper[4691]: I1124 09:23:29.744603 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7b6731c0-8185-4b19-8f1e-c3a6b85b972e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7b6731c0-8185-4b19-8f1e-c3a6b85b972e" (UID: "7b6731c0-8185-4b19-8f1e-c3a6b85b972e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 09:23:29 crc kubenswrapper[4691]: I1124 09:23:29.833649 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b6731c0-8185-4b19-8f1e-c3a6b85b972e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 09:23:29 crc kubenswrapper[4691]: I1124 09:23:29.913390 4691 generic.go:334] "Generic (PLEG): container finished" podID="7b6731c0-8185-4b19-8f1e-c3a6b85b972e" containerID="2551c0d3fb0952447c6b18e9461be9a2e887851b6ba394801deb138fddd13f2c" exitCode=0 Nov 24 09:23:29 crc kubenswrapper[4691]: I1124 09:23:29.913488 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gj4c6" Nov 24 09:23:29 crc kubenswrapper[4691]: I1124 09:23:29.913509 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gj4c6" event={"ID":"7b6731c0-8185-4b19-8f1e-c3a6b85b972e","Type":"ContainerDied","Data":"2551c0d3fb0952447c6b18e9461be9a2e887851b6ba394801deb138fddd13f2c"} Nov 24 09:23:29 crc kubenswrapper[4691]: I1124 09:23:29.913563 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gj4c6" event={"ID":"7b6731c0-8185-4b19-8f1e-c3a6b85b972e","Type":"ContainerDied","Data":"ad153e7c07a9ecfc800af2bc9fb2804a2759363817cdf6b901533e90163d9de2"} Nov 24 09:23:29 crc kubenswrapper[4691]: I1124 09:23:29.913588 4691 scope.go:117] "RemoveContainer" containerID="2551c0d3fb0952447c6b18e9461be9a2e887851b6ba394801deb138fddd13f2c" Nov 24 09:23:29 crc kubenswrapper[4691]: I1124 09:23:29.957428 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gj4c6"] Nov 24 09:23:29 crc kubenswrapper[4691]: I1124 09:23:29.957867 4691 scope.go:117] "RemoveContainer" containerID="22fed260e0c485225c7d67e7f5cbc7370f0983172d1e763c3e43e1745235e094" Nov 24 09:23:29 crc kubenswrapper[4691]: I1124 09:23:29.968626 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-gj4c6"] Nov 24 09:23:30 crc kubenswrapper[4691]: I1124 09:23:30.016007 4691 scope.go:117] "RemoveContainer" containerID="411071d5562a930dd40608f82bd31eab6599c9cb4637dde67706c7dd36a03d63" Nov 24 09:23:30 crc kubenswrapper[4691]: I1124 09:23:30.043942 4691 scope.go:117] "RemoveContainer" containerID="2551c0d3fb0952447c6b18e9461be9a2e887851b6ba394801deb138fddd13f2c" Nov 24 09:23:30 crc kubenswrapper[4691]: E1124 09:23:30.044720 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2551c0d3fb0952447c6b18e9461be9a2e887851b6ba394801deb138fddd13f2c\": container with ID starting with 2551c0d3fb0952447c6b18e9461be9a2e887851b6ba394801deb138fddd13f2c not found: ID does not exist" containerID="2551c0d3fb0952447c6b18e9461be9a2e887851b6ba394801deb138fddd13f2c" Nov 24 09:23:30 crc kubenswrapper[4691]: I1124 09:23:30.044760 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2551c0d3fb0952447c6b18e9461be9a2e887851b6ba394801deb138fddd13f2c"} err="failed to get container status \"2551c0d3fb0952447c6b18e9461be9a2e887851b6ba394801deb138fddd13f2c\": rpc error: code = NotFound desc = could not find container \"2551c0d3fb0952447c6b18e9461be9a2e887851b6ba394801deb138fddd13f2c\": container with ID starting with 2551c0d3fb0952447c6b18e9461be9a2e887851b6ba394801deb138fddd13f2c not found: ID does not exist" Nov 24 09:23:30 crc kubenswrapper[4691]: I1124 09:23:30.044786 4691 scope.go:117] "RemoveContainer" containerID="22fed260e0c485225c7d67e7f5cbc7370f0983172d1e763c3e43e1745235e094" Nov 24 09:23:30 crc kubenswrapper[4691]: E1124 09:23:30.045061 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"22fed260e0c485225c7d67e7f5cbc7370f0983172d1e763c3e43e1745235e094\": container with ID starting with 22fed260e0c485225c7d67e7f5cbc7370f0983172d1e763c3e43e1745235e094 not found: ID does not exist" containerID="22fed260e0c485225c7d67e7f5cbc7370f0983172d1e763c3e43e1745235e094" Nov 24 09:23:30 crc kubenswrapper[4691]: I1124 09:23:30.045107 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"22fed260e0c485225c7d67e7f5cbc7370f0983172d1e763c3e43e1745235e094"} err="failed to get container status \"22fed260e0c485225c7d67e7f5cbc7370f0983172d1e763c3e43e1745235e094\": rpc error: code = NotFound desc = could not find container \"22fed260e0c485225c7d67e7f5cbc7370f0983172d1e763c3e43e1745235e094\": container with ID starting with 22fed260e0c485225c7d67e7f5cbc7370f0983172d1e763c3e43e1745235e094 not found: ID does not exist" Nov 24 09:23:30 crc kubenswrapper[4691]: I1124 09:23:30.045151 4691 scope.go:117] "RemoveContainer" containerID="411071d5562a930dd40608f82bd31eab6599c9cb4637dde67706c7dd36a03d63" Nov 24 09:23:30 crc kubenswrapper[4691]: E1124 09:23:30.045415 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"411071d5562a930dd40608f82bd31eab6599c9cb4637dde67706c7dd36a03d63\": container with ID starting with 411071d5562a930dd40608f82bd31eab6599c9cb4637dde67706c7dd36a03d63 not found: ID does not exist" containerID="411071d5562a930dd40608f82bd31eab6599c9cb4637dde67706c7dd36a03d63" Nov 24 09:23:30 crc kubenswrapper[4691]: I1124 09:23:30.045439 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"411071d5562a930dd40608f82bd31eab6599c9cb4637dde67706c7dd36a03d63"} err="failed to get container status \"411071d5562a930dd40608f82bd31eab6599c9cb4637dde67706c7dd36a03d63\": rpc error: code = NotFound desc = could not find container \"411071d5562a930dd40608f82bd31eab6599c9cb4637dde67706c7dd36a03d63\": container with ID starting with 411071d5562a930dd40608f82bd31eab6599c9cb4637dde67706c7dd36a03d63 not found: ID does not exist" Nov 24 09:23:30 crc kubenswrapper[4691]: I1124 09:23:30.774042 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b6731c0-8185-4b19-8f1e-c3a6b85b972e" path="/var/lib/kubelet/pods/7b6731c0-8185-4b19-8f1e-c3a6b85b972e/volumes" Nov 24 09:23:37 crc kubenswrapper[4691]: I1124 09:23:37.761104 4691 scope.go:117] "RemoveContainer" containerID="f0361322acc6980ed49e64daf4258e8bc09b1cf31948b359c5ff23352e9d3b26" Nov 24 09:23:37 crc kubenswrapper[4691]: E1124 09:23:37.762284 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:23:51 crc kubenswrapper[4691]: I1124 09:23:51.761283 4691 scope.go:117] "RemoveContainer" containerID="f0361322acc6980ed49e64daf4258e8bc09b1cf31948b359c5ff23352e9d3b26" Nov 24 09:23:51 crc kubenswrapper[4691]: E1124 09:23:51.762086 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:24:02 crc kubenswrapper[4691]: I1124 09:24:02.760413 4691 scope.go:117] "RemoveContainer" containerID="f0361322acc6980ed49e64daf4258e8bc09b1cf31948b359c5ff23352e9d3b26" Nov 24 09:24:02 crc kubenswrapper[4691]: E1124 09:24:02.762346 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:24:09 crc kubenswrapper[4691]: I1124 09:24:09.108676 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-6kfk6"] Nov 24 09:24:09 crc kubenswrapper[4691]: E1124 09:24:09.109570 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b6731c0-8185-4b19-8f1e-c3a6b85b972e" containerName="extract-content" Nov 24 09:24:09 crc kubenswrapper[4691]: I1124 09:24:09.109583 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b6731c0-8185-4b19-8f1e-c3a6b85b972e" containerName="extract-content" Nov 24 09:24:09 crc kubenswrapper[4691]: E1124 09:24:09.109615 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b6731c0-8185-4b19-8f1e-c3a6b85b972e" containerName="extract-utilities" Nov 24 09:24:09 crc kubenswrapper[4691]: I1124 09:24:09.109621 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b6731c0-8185-4b19-8f1e-c3a6b85b972e" containerName="extract-utilities" Nov 24 09:24:09 crc kubenswrapper[4691]: E1124 09:24:09.109633 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b6731c0-8185-4b19-8f1e-c3a6b85b972e" containerName="registry-server" Nov 24 09:24:09 crc kubenswrapper[4691]: I1124 09:24:09.109639 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b6731c0-8185-4b19-8f1e-c3a6b85b972e" containerName="registry-server" Nov 24 09:24:09 crc kubenswrapper[4691]: I1124 09:24:09.109820 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b6731c0-8185-4b19-8f1e-c3a6b85b972e" containerName="registry-server" Nov 24 09:24:09 crc kubenswrapper[4691]: I1124 09:24:09.111119 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6kfk6" Nov 24 09:24:09 crc kubenswrapper[4691]: I1124 09:24:09.121613 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6kfk6"] Nov 24 09:24:09 crc kubenswrapper[4691]: I1124 09:24:09.244114 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b1bf1ae-07e0-48b3-8729-e6e0751638b1-catalog-content\") pod \"community-operators-6kfk6\" (UID: \"0b1bf1ae-07e0-48b3-8729-e6e0751638b1\") " pod="openshift-marketplace/community-operators-6kfk6" Nov 24 09:24:09 crc kubenswrapper[4691]: I1124 09:24:09.244284 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j29m4\" (UniqueName: \"kubernetes.io/projected/0b1bf1ae-07e0-48b3-8729-e6e0751638b1-kube-api-access-j29m4\") pod \"community-operators-6kfk6\" (UID: \"0b1bf1ae-07e0-48b3-8729-e6e0751638b1\") " pod="openshift-marketplace/community-operators-6kfk6" Nov 24 09:24:09 crc kubenswrapper[4691]: I1124 09:24:09.244314 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b1bf1ae-07e0-48b3-8729-e6e0751638b1-utilities\") pod \"community-operators-6kfk6\" (UID: \"0b1bf1ae-07e0-48b3-8729-e6e0751638b1\") " pod="openshift-marketplace/community-operators-6kfk6" Nov 24 09:24:09 crc kubenswrapper[4691]: I1124 09:24:09.346554 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j29m4\" (UniqueName: \"kubernetes.io/projected/0b1bf1ae-07e0-48b3-8729-e6e0751638b1-kube-api-access-j29m4\") pod \"community-operators-6kfk6\" (UID: \"0b1bf1ae-07e0-48b3-8729-e6e0751638b1\") " pod="openshift-marketplace/community-operators-6kfk6" Nov 24 09:24:09 crc kubenswrapper[4691]: I1124 09:24:09.346615 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b1bf1ae-07e0-48b3-8729-e6e0751638b1-utilities\") pod \"community-operators-6kfk6\" (UID: \"0b1bf1ae-07e0-48b3-8729-e6e0751638b1\") " pod="openshift-marketplace/community-operators-6kfk6" Nov 24 09:24:09 crc kubenswrapper[4691]: I1124 09:24:09.346684 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b1bf1ae-07e0-48b3-8729-e6e0751638b1-catalog-content\") pod \"community-operators-6kfk6\" (UID: \"0b1bf1ae-07e0-48b3-8729-e6e0751638b1\") " pod="openshift-marketplace/community-operators-6kfk6" Nov 24 09:24:09 crc kubenswrapper[4691]: I1124 09:24:09.347108 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b1bf1ae-07e0-48b3-8729-e6e0751638b1-catalog-content\") pod \"community-operators-6kfk6\" (UID: \"0b1bf1ae-07e0-48b3-8729-e6e0751638b1\") " pod="openshift-marketplace/community-operators-6kfk6" Nov 24 09:24:09 crc kubenswrapper[4691]: I1124 09:24:09.347695 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b1bf1ae-07e0-48b3-8729-e6e0751638b1-utilities\") pod \"community-operators-6kfk6\" (UID: \"0b1bf1ae-07e0-48b3-8729-e6e0751638b1\") " pod="openshift-marketplace/community-operators-6kfk6" Nov 24 09:24:09 crc kubenswrapper[4691]: I1124 09:24:09.366302 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j29m4\" (UniqueName: \"kubernetes.io/projected/0b1bf1ae-07e0-48b3-8729-e6e0751638b1-kube-api-access-j29m4\") pod \"community-operators-6kfk6\" (UID: \"0b1bf1ae-07e0-48b3-8729-e6e0751638b1\") " pod="openshift-marketplace/community-operators-6kfk6" Nov 24 09:24:09 crc kubenswrapper[4691]: I1124 09:24:09.429209 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6kfk6" Nov 24 09:24:09 crc kubenswrapper[4691]: I1124 09:24:09.914396 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6kfk6"] Nov 24 09:24:10 crc kubenswrapper[4691]: I1124 09:24:10.306914 4691 generic.go:334] "Generic (PLEG): container finished" podID="0b1bf1ae-07e0-48b3-8729-e6e0751638b1" containerID="1a49921a66f0b34c3ab141dcb43c426f971c97bcaf026e37c40edc318eebbf60" exitCode=0 Nov 24 09:24:10 crc kubenswrapper[4691]: I1124 09:24:10.308574 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6kfk6" event={"ID":"0b1bf1ae-07e0-48b3-8729-e6e0751638b1","Type":"ContainerDied","Data":"1a49921a66f0b34c3ab141dcb43c426f971c97bcaf026e37c40edc318eebbf60"} Nov 24 09:24:10 crc kubenswrapper[4691]: I1124 09:24:10.308746 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6kfk6" event={"ID":"0b1bf1ae-07e0-48b3-8729-e6e0751638b1","Type":"ContainerStarted","Data":"6508850569bb7eb09f53ff68c08878192ce99635a7a01c90ec633df9c477225f"} Nov 24 09:24:13 crc kubenswrapper[4691]: I1124 09:24:13.331616 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6kfk6" event={"ID":"0b1bf1ae-07e0-48b3-8729-e6e0751638b1","Type":"ContainerStarted","Data":"ae15b9e5a5358d7b29a494fd4089ff677ec9b0e1abaac12f5541e5df159207dc"} Nov 24 09:24:13 crc kubenswrapper[4691]: I1124 09:24:13.760622 4691 scope.go:117] "RemoveContainer" containerID="f0361322acc6980ed49e64daf4258e8bc09b1cf31948b359c5ff23352e9d3b26" Nov 24 09:24:13 crc kubenswrapper[4691]: E1124 09:24:13.761016 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:24:14 crc kubenswrapper[4691]: I1124 09:24:14.345501 4691 generic.go:334] "Generic (PLEG): container finished" podID="0b1bf1ae-07e0-48b3-8729-e6e0751638b1" containerID="ae15b9e5a5358d7b29a494fd4089ff677ec9b0e1abaac12f5541e5df159207dc" exitCode=0 Nov 24 09:24:14 crc kubenswrapper[4691]: I1124 09:24:14.345824 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6kfk6" event={"ID":"0b1bf1ae-07e0-48b3-8729-e6e0751638b1","Type":"ContainerDied","Data":"ae15b9e5a5358d7b29a494fd4089ff677ec9b0e1abaac12f5541e5df159207dc"} Nov 24 09:24:15 crc kubenswrapper[4691]: I1124 09:24:15.354591 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6kfk6" event={"ID":"0b1bf1ae-07e0-48b3-8729-e6e0751638b1","Type":"ContainerStarted","Data":"851a6e499cbda074488adb48f9d86065f8d0beeb03641331385e847637f9242d"} Nov 24 09:24:15 crc kubenswrapper[4691]: I1124 09:24:15.373498 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-6kfk6" podStartSLOduration=1.791987374 podStartE2EDuration="6.373479489s" podCreationTimestamp="2025-11-24 09:24:09 +0000 UTC" firstStartedPulling="2025-11-24 09:24:10.308495113 +0000 UTC m=+5212.307444362" lastFinishedPulling="2025-11-24 09:24:14.889987228 +0000 UTC m=+5216.888936477" observedRunningTime="2025-11-24 09:24:15.369611169 +0000 UTC m=+5217.368560418" watchObservedRunningTime="2025-11-24 09:24:15.373479489 +0000 UTC m=+5217.372428738" Nov 24 09:24:19 crc kubenswrapper[4691]: I1124 09:24:19.438551 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-6kfk6" Nov 24 09:24:19 crc kubenswrapper[4691]: I1124 09:24:19.439162 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-6kfk6" Nov 24 09:24:19 crc kubenswrapper[4691]: I1124 09:24:19.490911 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-6kfk6" Nov 24 09:24:20 crc kubenswrapper[4691]: I1124 09:24:20.461553 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-6kfk6" Nov 24 09:24:20 crc kubenswrapper[4691]: I1124 09:24:20.506361 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6kfk6"] Nov 24 09:24:22 crc kubenswrapper[4691]: I1124 09:24:22.418776 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-6kfk6" podUID="0b1bf1ae-07e0-48b3-8729-e6e0751638b1" containerName="registry-server" containerID="cri-o://851a6e499cbda074488adb48f9d86065f8d0beeb03641331385e847637f9242d" gracePeriod=2 Nov 24 09:24:22 crc kubenswrapper[4691]: I1124 09:24:22.929750 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6kfk6" Nov 24 09:24:23 crc kubenswrapper[4691]: I1124 09:24:23.040352 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b1bf1ae-07e0-48b3-8729-e6e0751638b1-catalog-content\") pod \"0b1bf1ae-07e0-48b3-8729-e6e0751638b1\" (UID: \"0b1bf1ae-07e0-48b3-8729-e6e0751638b1\") " Nov 24 09:24:23 crc kubenswrapper[4691]: I1124 09:24:23.040491 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j29m4\" (UniqueName: \"kubernetes.io/projected/0b1bf1ae-07e0-48b3-8729-e6e0751638b1-kube-api-access-j29m4\") pod \"0b1bf1ae-07e0-48b3-8729-e6e0751638b1\" (UID: \"0b1bf1ae-07e0-48b3-8729-e6e0751638b1\") " Nov 24 09:24:23 crc kubenswrapper[4691]: I1124 09:24:23.041603 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b1bf1ae-07e0-48b3-8729-e6e0751638b1-utilities\") pod \"0b1bf1ae-07e0-48b3-8729-e6e0751638b1\" (UID: \"0b1bf1ae-07e0-48b3-8729-e6e0751638b1\") " Nov 24 09:24:23 crc kubenswrapper[4691]: I1124 09:24:23.042536 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b1bf1ae-07e0-48b3-8729-e6e0751638b1-utilities" (OuterVolumeSpecName: "utilities") pod "0b1bf1ae-07e0-48b3-8729-e6e0751638b1" (UID: "0b1bf1ae-07e0-48b3-8729-e6e0751638b1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 09:24:23 crc kubenswrapper[4691]: I1124 09:24:23.051902 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b1bf1ae-07e0-48b3-8729-e6e0751638b1-kube-api-access-j29m4" (OuterVolumeSpecName: "kube-api-access-j29m4") pod "0b1bf1ae-07e0-48b3-8729-e6e0751638b1" (UID: "0b1bf1ae-07e0-48b3-8729-e6e0751638b1"). InnerVolumeSpecName "kube-api-access-j29m4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 09:24:23 crc kubenswrapper[4691]: I1124 09:24:23.098573 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b1bf1ae-07e0-48b3-8729-e6e0751638b1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0b1bf1ae-07e0-48b3-8729-e6e0751638b1" (UID: "0b1bf1ae-07e0-48b3-8729-e6e0751638b1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 09:24:23 crc kubenswrapper[4691]: I1124 09:24:23.143259 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b1bf1ae-07e0-48b3-8729-e6e0751638b1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 09:24:23 crc kubenswrapper[4691]: I1124 09:24:23.143296 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j29m4\" (UniqueName: \"kubernetes.io/projected/0b1bf1ae-07e0-48b3-8729-e6e0751638b1-kube-api-access-j29m4\") on node \"crc\" DevicePath \"\"" Nov 24 09:24:23 crc kubenswrapper[4691]: I1124 09:24:23.143365 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b1bf1ae-07e0-48b3-8729-e6e0751638b1-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 09:24:23 crc kubenswrapper[4691]: I1124 09:24:23.427184 4691 generic.go:334] "Generic (PLEG): container finished" podID="0b1bf1ae-07e0-48b3-8729-e6e0751638b1" containerID="851a6e499cbda074488adb48f9d86065f8d0beeb03641331385e847637f9242d" exitCode=0 Nov 24 09:24:23 crc kubenswrapper[4691]: I1124 09:24:23.427231 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6kfk6" event={"ID":"0b1bf1ae-07e0-48b3-8729-e6e0751638b1","Type":"ContainerDied","Data":"851a6e499cbda074488adb48f9d86065f8d0beeb03641331385e847637f9242d"} Nov 24 09:24:23 crc kubenswrapper[4691]: I1124 09:24:23.427275 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6kfk6" Nov 24 09:24:23 crc kubenswrapper[4691]: I1124 09:24:23.427293 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6kfk6" event={"ID":"0b1bf1ae-07e0-48b3-8729-e6e0751638b1","Type":"ContainerDied","Data":"6508850569bb7eb09f53ff68c08878192ce99635a7a01c90ec633df9c477225f"} Nov 24 09:24:23 crc kubenswrapper[4691]: I1124 09:24:23.427320 4691 scope.go:117] "RemoveContainer" containerID="851a6e499cbda074488adb48f9d86065f8d0beeb03641331385e847637f9242d" Nov 24 09:24:23 crc kubenswrapper[4691]: I1124 09:24:23.466659 4691 scope.go:117] "RemoveContainer" containerID="ae15b9e5a5358d7b29a494fd4089ff677ec9b0e1abaac12f5541e5df159207dc" Nov 24 09:24:23 crc kubenswrapper[4691]: I1124 09:24:23.503514 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6kfk6"] Nov 24 09:24:23 crc kubenswrapper[4691]: I1124 09:24:23.529617 4691 scope.go:117] "RemoveContainer" containerID="1a49921a66f0b34c3ab141dcb43c426f971c97bcaf026e37c40edc318eebbf60" Nov 24 09:24:23 crc kubenswrapper[4691]: I1124 09:24:23.535763 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-6kfk6"] Nov 24 09:24:23 crc kubenswrapper[4691]: I1124 09:24:23.617762 4691 scope.go:117] "RemoveContainer" containerID="851a6e499cbda074488adb48f9d86065f8d0beeb03641331385e847637f9242d" Nov 24 09:24:23 crc kubenswrapper[4691]: E1124 09:24:23.618136 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"851a6e499cbda074488adb48f9d86065f8d0beeb03641331385e847637f9242d\": container with ID starting with 851a6e499cbda074488adb48f9d86065f8d0beeb03641331385e847637f9242d not found: ID does not exist" containerID="851a6e499cbda074488adb48f9d86065f8d0beeb03641331385e847637f9242d" Nov 24 09:24:23 crc kubenswrapper[4691]: I1124 09:24:23.618162 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"851a6e499cbda074488adb48f9d86065f8d0beeb03641331385e847637f9242d"} err="failed to get container status \"851a6e499cbda074488adb48f9d86065f8d0beeb03641331385e847637f9242d\": rpc error: code = NotFound desc = could not find container \"851a6e499cbda074488adb48f9d86065f8d0beeb03641331385e847637f9242d\": container with ID starting with 851a6e499cbda074488adb48f9d86065f8d0beeb03641331385e847637f9242d not found: ID does not exist" Nov 24 09:24:23 crc kubenswrapper[4691]: I1124 09:24:23.618183 4691 scope.go:117] "RemoveContainer" containerID="ae15b9e5a5358d7b29a494fd4089ff677ec9b0e1abaac12f5541e5df159207dc" Nov 24 09:24:23 crc kubenswrapper[4691]: E1124 09:24:23.618468 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ae15b9e5a5358d7b29a494fd4089ff677ec9b0e1abaac12f5541e5df159207dc\": container with ID starting with ae15b9e5a5358d7b29a494fd4089ff677ec9b0e1abaac12f5541e5df159207dc not found: ID does not exist" containerID="ae15b9e5a5358d7b29a494fd4089ff677ec9b0e1abaac12f5541e5df159207dc" Nov 24 09:24:23 crc kubenswrapper[4691]: I1124 09:24:23.618510 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae15b9e5a5358d7b29a494fd4089ff677ec9b0e1abaac12f5541e5df159207dc"} err="failed to get container status \"ae15b9e5a5358d7b29a494fd4089ff677ec9b0e1abaac12f5541e5df159207dc\": rpc error: code = NotFound desc = could not find container \"ae15b9e5a5358d7b29a494fd4089ff677ec9b0e1abaac12f5541e5df159207dc\": container with ID starting with ae15b9e5a5358d7b29a494fd4089ff677ec9b0e1abaac12f5541e5df159207dc not found: ID does not exist" Nov 24 09:24:23 crc kubenswrapper[4691]: I1124 09:24:23.618540 4691 scope.go:117] "RemoveContainer" containerID="1a49921a66f0b34c3ab141dcb43c426f971c97bcaf026e37c40edc318eebbf60" Nov 24 09:24:23 crc kubenswrapper[4691]: E1124 09:24:23.618807 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1a49921a66f0b34c3ab141dcb43c426f971c97bcaf026e37c40edc318eebbf60\": container with ID starting with 1a49921a66f0b34c3ab141dcb43c426f971c97bcaf026e37c40edc318eebbf60 not found: ID does not exist" containerID="1a49921a66f0b34c3ab141dcb43c426f971c97bcaf026e37c40edc318eebbf60" Nov 24 09:24:23 crc kubenswrapper[4691]: I1124 09:24:23.618830 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1a49921a66f0b34c3ab141dcb43c426f971c97bcaf026e37c40edc318eebbf60"} err="failed to get container status \"1a49921a66f0b34c3ab141dcb43c426f971c97bcaf026e37c40edc318eebbf60\": rpc error: code = NotFound desc = could not find container \"1a49921a66f0b34c3ab141dcb43c426f971c97bcaf026e37c40edc318eebbf60\": container with ID starting with 1a49921a66f0b34c3ab141dcb43c426f971c97bcaf026e37c40edc318eebbf60 not found: ID does not exist" Nov 24 09:24:23 crc kubenswrapper[4691]: E1124 09:24:23.649577 4691 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0b1bf1ae_07e0_48b3_8729_e6e0751638b1.slice\": RecentStats: unable to find data in memory cache]" Nov 24 09:24:24 crc kubenswrapper[4691]: I1124 09:24:24.761569 4691 scope.go:117] "RemoveContainer" containerID="f0361322acc6980ed49e64daf4258e8bc09b1cf31948b359c5ff23352e9d3b26" Nov 24 09:24:24 crc kubenswrapper[4691]: E1124 09:24:24.762227 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:24:24 crc kubenswrapper[4691]: I1124 09:24:24.774294 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b1bf1ae-07e0-48b3-8729-e6e0751638b1" path="/var/lib/kubelet/pods/0b1bf1ae-07e0-48b3-8729-e6e0751638b1/volumes" Nov 24 09:24:36 crc kubenswrapper[4691]: I1124 09:24:36.761383 4691 scope.go:117] "RemoveContainer" containerID="f0361322acc6980ed49e64daf4258e8bc09b1cf31948b359c5ff23352e9d3b26" Nov 24 09:24:36 crc kubenswrapper[4691]: E1124 09:24:36.762840 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:24:48 crc kubenswrapper[4691]: I1124 09:24:48.774531 4691 scope.go:117] "RemoveContainer" containerID="f0361322acc6980ed49e64daf4258e8bc09b1cf31948b359c5ff23352e9d3b26" Nov 24 09:24:48 crc kubenswrapper[4691]: E1124 09:24:48.775658 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:25:01 crc kubenswrapper[4691]: I1124 09:25:01.761885 4691 scope.go:117] "RemoveContainer" containerID="f0361322acc6980ed49e64daf4258e8bc09b1cf31948b359c5ff23352e9d3b26" Nov 24 09:25:01 crc kubenswrapper[4691]: E1124 09:25:01.763171 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:25:08 crc kubenswrapper[4691]: I1124 09:25:08.325509 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-qczfr/must-gather-fx258"] Nov 24 09:25:08 crc kubenswrapper[4691]: E1124 09:25:08.326598 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b1bf1ae-07e0-48b3-8729-e6e0751638b1" containerName="registry-server" Nov 24 09:25:08 crc kubenswrapper[4691]: I1124 09:25:08.326619 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b1bf1ae-07e0-48b3-8729-e6e0751638b1" containerName="registry-server" Nov 24 09:25:08 crc kubenswrapper[4691]: E1124 09:25:08.326647 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b1bf1ae-07e0-48b3-8729-e6e0751638b1" containerName="extract-content" Nov 24 09:25:08 crc kubenswrapper[4691]: I1124 09:25:08.326656 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b1bf1ae-07e0-48b3-8729-e6e0751638b1" containerName="extract-content" Nov 24 09:25:08 crc kubenswrapper[4691]: E1124 09:25:08.326670 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b1bf1ae-07e0-48b3-8729-e6e0751638b1" containerName="extract-utilities" Nov 24 09:25:08 crc kubenswrapper[4691]: I1124 09:25:08.326679 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b1bf1ae-07e0-48b3-8729-e6e0751638b1" containerName="extract-utilities" Nov 24 09:25:08 crc kubenswrapper[4691]: I1124 09:25:08.326994 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b1bf1ae-07e0-48b3-8729-e6e0751638b1" containerName="registry-server" Nov 24 09:25:08 crc kubenswrapper[4691]: I1124 09:25:08.328256 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qczfr/must-gather-fx258" Nov 24 09:25:08 crc kubenswrapper[4691]: I1124 09:25:08.330404 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-qczfr"/"openshift-service-ca.crt" Nov 24 09:25:08 crc kubenswrapper[4691]: I1124 09:25:08.330769 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-qczfr"/"kube-root-ca.crt" Nov 24 09:25:08 crc kubenswrapper[4691]: I1124 09:25:08.342570 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-qczfr/must-gather-fx258"] Nov 24 09:25:08 crc kubenswrapper[4691]: I1124 09:25:08.396781 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6rcj\" (UniqueName: \"kubernetes.io/projected/295248d6-7aed-4269-ad13-efd75ab5499b-kube-api-access-w6rcj\") pod \"must-gather-fx258\" (UID: \"295248d6-7aed-4269-ad13-efd75ab5499b\") " pod="openshift-must-gather-qczfr/must-gather-fx258" Nov 24 09:25:08 crc kubenswrapper[4691]: I1124 09:25:08.396830 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/295248d6-7aed-4269-ad13-efd75ab5499b-must-gather-output\") pod \"must-gather-fx258\" (UID: \"295248d6-7aed-4269-ad13-efd75ab5499b\") " pod="openshift-must-gather-qczfr/must-gather-fx258" Nov 24 09:25:08 crc kubenswrapper[4691]: I1124 09:25:08.498875 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6rcj\" (UniqueName: \"kubernetes.io/projected/295248d6-7aed-4269-ad13-efd75ab5499b-kube-api-access-w6rcj\") pod \"must-gather-fx258\" (UID: \"295248d6-7aed-4269-ad13-efd75ab5499b\") " pod="openshift-must-gather-qczfr/must-gather-fx258" Nov 24 09:25:08 crc kubenswrapper[4691]: I1124 09:25:08.498941 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/295248d6-7aed-4269-ad13-efd75ab5499b-must-gather-output\") pod \"must-gather-fx258\" (UID: \"295248d6-7aed-4269-ad13-efd75ab5499b\") " pod="openshift-must-gather-qczfr/must-gather-fx258" Nov 24 09:25:08 crc kubenswrapper[4691]: I1124 09:25:08.499600 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/295248d6-7aed-4269-ad13-efd75ab5499b-must-gather-output\") pod \"must-gather-fx258\" (UID: \"295248d6-7aed-4269-ad13-efd75ab5499b\") " pod="openshift-must-gather-qczfr/must-gather-fx258" Nov 24 09:25:08 crc kubenswrapper[4691]: I1124 09:25:08.519199 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6rcj\" (UniqueName: \"kubernetes.io/projected/295248d6-7aed-4269-ad13-efd75ab5499b-kube-api-access-w6rcj\") pod \"must-gather-fx258\" (UID: \"295248d6-7aed-4269-ad13-efd75ab5499b\") " pod="openshift-must-gather-qczfr/must-gather-fx258" Nov 24 09:25:08 crc kubenswrapper[4691]: I1124 09:25:08.660432 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qczfr/must-gather-fx258" Nov 24 09:25:09 crc kubenswrapper[4691]: I1124 09:25:09.144141 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-qczfr/must-gather-fx258"] Nov 24 09:25:09 crc kubenswrapper[4691]: I1124 09:25:09.161241 4691 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 09:25:09 crc kubenswrapper[4691]: I1124 09:25:09.851767 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-qczfr/must-gather-fx258" event={"ID":"295248d6-7aed-4269-ad13-efd75ab5499b","Type":"ContainerStarted","Data":"711b986ec803abccbbc05da28c120d395bac43cd6cb751f15ab620410241645e"} Nov 24 09:25:12 crc kubenswrapper[4691]: I1124 09:25:12.762274 4691 scope.go:117] "RemoveContainer" containerID="f0361322acc6980ed49e64daf4258e8bc09b1cf31948b359c5ff23352e9d3b26" Nov 24 09:25:12 crc kubenswrapper[4691]: E1124 09:25:12.763655 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:25:18 crc kubenswrapper[4691]: I1124 09:25:18.994043 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-qczfr/must-gather-fx258" event={"ID":"295248d6-7aed-4269-ad13-efd75ab5499b","Type":"ContainerStarted","Data":"7d60996796e71bd913a9f7b2cc1ed14f7d4ee31c36eb57e7ff983e784fa6051d"} Nov 24 09:25:18 crc kubenswrapper[4691]: I1124 09:25:18.995801 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-qczfr/must-gather-fx258" event={"ID":"295248d6-7aed-4269-ad13-efd75ab5499b","Type":"ContainerStarted","Data":"c84d4b4f7efe496e576f82873bd7dbf5a08563001c970cd00b5ae8de1df0992a"} Nov 24 09:25:19 crc kubenswrapper[4691]: I1124 09:25:19.033724 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-qczfr/must-gather-fx258" podStartSLOduration=2.427730226 podStartE2EDuration="11.033698548s" podCreationTimestamp="2025-11-24 09:25:08 +0000 UTC" firstStartedPulling="2025-11-24 09:25:09.161206411 +0000 UTC m=+5271.160155660" lastFinishedPulling="2025-11-24 09:25:17.767174713 +0000 UTC m=+5279.766123982" observedRunningTime="2025-11-24 09:25:19.019776352 +0000 UTC m=+5281.018725611" watchObservedRunningTime="2025-11-24 09:25:19.033698548 +0000 UTC m=+5281.032647797" Nov 24 09:25:21 crc kubenswrapper[4691]: I1124 09:25:21.671022 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-qczfr/crc-debug-kvqk5"] Nov 24 09:25:21 crc kubenswrapper[4691]: I1124 09:25:21.672716 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qczfr/crc-debug-kvqk5" Nov 24 09:25:21 crc kubenswrapper[4691]: I1124 09:25:21.674990 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-qczfr"/"default-dockercfg-bt98g" Nov 24 09:25:21 crc kubenswrapper[4691]: I1124 09:25:21.765916 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/47076f34-49b0-4780-ae6f-1769de3a066f-host\") pod \"crc-debug-kvqk5\" (UID: \"47076f34-49b0-4780-ae6f-1769de3a066f\") " pod="openshift-must-gather-qczfr/crc-debug-kvqk5" Nov 24 09:25:21 crc kubenswrapper[4691]: I1124 09:25:21.766242 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n9jpt\" (UniqueName: \"kubernetes.io/projected/47076f34-49b0-4780-ae6f-1769de3a066f-kube-api-access-n9jpt\") pod \"crc-debug-kvqk5\" (UID: \"47076f34-49b0-4780-ae6f-1769de3a066f\") " pod="openshift-must-gather-qczfr/crc-debug-kvqk5" Nov 24 09:25:21 crc kubenswrapper[4691]: I1124 09:25:21.867800 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/47076f34-49b0-4780-ae6f-1769de3a066f-host\") pod \"crc-debug-kvqk5\" (UID: \"47076f34-49b0-4780-ae6f-1769de3a066f\") " pod="openshift-must-gather-qczfr/crc-debug-kvqk5" Nov 24 09:25:21 crc kubenswrapper[4691]: I1124 09:25:21.867868 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n9jpt\" (UniqueName: \"kubernetes.io/projected/47076f34-49b0-4780-ae6f-1769de3a066f-kube-api-access-n9jpt\") pod \"crc-debug-kvqk5\" (UID: \"47076f34-49b0-4780-ae6f-1769de3a066f\") " pod="openshift-must-gather-qczfr/crc-debug-kvqk5" Nov 24 09:25:21 crc kubenswrapper[4691]: I1124 09:25:21.868356 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/47076f34-49b0-4780-ae6f-1769de3a066f-host\") pod \"crc-debug-kvqk5\" (UID: \"47076f34-49b0-4780-ae6f-1769de3a066f\") " pod="openshift-must-gather-qczfr/crc-debug-kvqk5" Nov 24 09:25:21 crc kubenswrapper[4691]: I1124 09:25:21.900527 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n9jpt\" (UniqueName: \"kubernetes.io/projected/47076f34-49b0-4780-ae6f-1769de3a066f-kube-api-access-n9jpt\") pod \"crc-debug-kvqk5\" (UID: \"47076f34-49b0-4780-ae6f-1769de3a066f\") " pod="openshift-must-gather-qczfr/crc-debug-kvqk5" Nov 24 09:25:22 crc kubenswrapper[4691]: I1124 09:25:22.000188 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qczfr/crc-debug-kvqk5" Nov 24 09:25:23 crc kubenswrapper[4691]: I1124 09:25:23.035356 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-qczfr/crc-debug-kvqk5" event={"ID":"47076f34-49b0-4780-ae6f-1769de3a066f","Type":"ContainerStarted","Data":"65122c9ebbef346b972199040ff256b6c341dffcf7c211db67a7f9ca7e503ba7"} Nov 24 09:25:23 crc kubenswrapper[4691]: I1124 09:25:23.761517 4691 scope.go:117] "RemoveContainer" containerID="f0361322acc6980ed49e64daf4258e8bc09b1cf31948b359c5ff23352e9d3b26" Nov 24 09:25:23 crc kubenswrapper[4691]: E1124 09:25:23.761796 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:25:36 crc kubenswrapper[4691]: I1124 09:25:36.172246 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-qczfr/crc-debug-kvqk5" event={"ID":"47076f34-49b0-4780-ae6f-1769de3a066f","Type":"ContainerStarted","Data":"ea006c30efd3fd32e3f0daa6be8084538a4399a5de2b8c4cd11a10912fc8632b"} Nov 24 09:25:36 crc kubenswrapper[4691]: I1124 09:25:36.190002 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-qczfr/crc-debug-kvqk5" podStartSLOduration=2.022883082 podStartE2EDuration="15.189975539s" podCreationTimestamp="2025-11-24 09:25:21 +0000 UTC" firstStartedPulling="2025-11-24 09:25:22.046951504 +0000 UTC m=+5284.045900753" lastFinishedPulling="2025-11-24 09:25:35.214043961 +0000 UTC m=+5297.212993210" observedRunningTime="2025-11-24 09:25:36.185891163 +0000 UTC m=+5298.184840422" watchObservedRunningTime="2025-11-24 09:25:36.189975539 +0000 UTC m=+5298.188924788" Nov 24 09:25:37 crc kubenswrapper[4691]: I1124 09:25:37.760336 4691 scope.go:117] "RemoveContainer" containerID="f0361322acc6980ed49e64daf4258e8bc09b1cf31948b359c5ff23352e9d3b26" Nov 24 09:25:37 crc kubenswrapper[4691]: E1124 09:25:37.760900 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:25:49 crc kubenswrapper[4691]: I1124 09:25:49.761417 4691 scope.go:117] "RemoveContainer" containerID="f0361322acc6980ed49e64daf4258e8bc09b1cf31948b359c5ff23352e9d3b26" Nov 24 09:25:49 crc kubenswrapper[4691]: E1124 09:25:49.763936 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:26:04 crc kubenswrapper[4691]: I1124 09:26:04.761367 4691 scope.go:117] "RemoveContainer" containerID="f0361322acc6980ed49e64daf4258e8bc09b1cf31948b359c5ff23352e9d3b26" Nov 24 09:26:05 crc kubenswrapper[4691]: I1124 09:26:05.438685 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerStarted","Data":"66a56f3fd5650c8a5dc80218e98268dc9ad3afeff230b247f513bb7c5ff0fe3d"} Nov 24 09:26:06 crc kubenswrapper[4691]: I1124 09:26:06.387910 4691 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-6bf54cf5bc-7wgwz" podUID="9a9213e2-4a1f-4d15-ab02-472c467babfe" containerName="proxy-server" probeResult="failure" output="HTTP probe failed with statuscode: 502" Nov 24 09:26:30 crc kubenswrapper[4691]: I1124 09:26:30.679195 4691 generic.go:334] "Generic (PLEG): container finished" podID="47076f34-49b0-4780-ae6f-1769de3a066f" containerID="ea006c30efd3fd32e3f0daa6be8084538a4399a5de2b8c4cd11a10912fc8632b" exitCode=0 Nov 24 09:26:30 crc kubenswrapper[4691]: I1124 09:26:30.679381 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-qczfr/crc-debug-kvqk5" event={"ID":"47076f34-49b0-4780-ae6f-1769de3a066f","Type":"ContainerDied","Data":"ea006c30efd3fd32e3f0daa6be8084538a4399a5de2b8c4cd11a10912fc8632b"} Nov 24 09:26:31 crc kubenswrapper[4691]: I1124 09:26:31.790197 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qczfr/crc-debug-kvqk5" Nov 24 09:26:31 crc kubenswrapper[4691]: I1124 09:26:31.828287 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-qczfr/crc-debug-kvqk5"] Nov 24 09:26:31 crc kubenswrapper[4691]: I1124 09:26:31.835155 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-qczfr/crc-debug-kvqk5"] Nov 24 09:26:31 crc kubenswrapper[4691]: I1124 09:26:31.910404 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n9jpt\" (UniqueName: \"kubernetes.io/projected/47076f34-49b0-4780-ae6f-1769de3a066f-kube-api-access-n9jpt\") pod \"47076f34-49b0-4780-ae6f-1769de3a066f\" (UID: \"47076f34-49b0-4780-ae6f-1769de3a066f\") " Nov 24 09:26:31 crc kubenswrapper[4691]: I1124 09:26:31.910712 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/47076f34-49b0-4780-ae6f-1769de3a066f-host\") pod \"47076f34-49b0-4780-ae6f-1769de3a066f\" (UID: \"47076f34-49b0-4780-ae6f-1769de3a066f\") " Nov 24 09:26:31 crc kubenswrapper[4691]: I1124 09:26:31.911157 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/47076f34-49b0-4780-ae6f-1769de3a066f-host" (OuterVolumeSpecName: "host") pod "47076f34-49b0-4780-ae6f-1769de3a066f" (UID: "47076f34-49b0-4780-ae6f-1769de3a066f"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 09:26:31 crc kubenswrapper[4691]: I1124 09:26:31.917065 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47076f34-49b0-4780-ae6f-1769de3a066f-kube-api-access-n9jpt" (OuterVolumeSpecName: "kube-api-access-n9jpt") pod "47076f34-49b0-4780-ae6f-1769de3a066f" (UID: "47076f34-49b0-4780-ae6f-1769de3a066f"). InnerVolumeSpecName "kube-api-access-n9jpt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 09:26:32 crc kubenswrapper[4691]: I1124 09:26:32.013214 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n9jpt\" (UniqueName: \"kubernetes.io/projected/47076f34-49b0-4780-ae6f-1769de3a066f-kube-api-access-n9jpt\") on node \"crc\" DevicePath \"\"" Nov 24 09:26:32 crc kubenswrapper[4691]: I1124 09:26:32.013251 4691 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/47076f34-49b0-4780-ae6f-1769de3a066f-host\") on node \"crc\" DevicePath \"\"" Nov 24 09:26:32 crc kubenswrapper[4691]: I1124 09:26:32.699227 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="65122c9ebbef346b972199040ff256b6c341dffcf7c211db67a7f9ca7e503ba7" Nov 24 09:26:32 crc kubenswrapper[4691]: I1124 09:26:32.699298 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qczfr/crc-debug-kvqk5" Nov 24 09:26:32 crc kubenswrapper[4691]: I1124 09:26:32.772917 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="47076f34-49b0-4780-ae6f-1769de3a066f" path="/var/lib/kubelet/pods/47076f34-49b0-4780-ae6f-1769de3a066f/volumes" Nov 24 09:26:33 crc kubenswrapper[4691]: I1124 09:26:33.011137 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-qczfr/crc-debug-28qdw"] Nov 24 09:26:33 crc kubenswrapper[4691]: E1124 09:26:33.012608 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47076f34-49b0-4780-ae6f-1769de3a066f" containerName="container-00" Nov 24 09:26:33 crc kubenswrapper[4691]: I1124 09:26:33.012711 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="47076f34-49b0-4780-ae6f-1769de3a066f" containerName="container-00" Nov 24 09:26:33 crc kubenswrapper[4691]: I1124 09:26:33.013293 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="47076f34-49b0-4780-ae6f-1769de3a066f" containerName="container-00" Nov 24 09:26:33 crc kubenswrapper[4691]: I1124 09:26:33.014160 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qczfr/crc-debug-28qdw" Nov 24 09:26:33 crc kubenswrapper[4691]: I1124 09:26:33.016962 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-qczfr"/"default-dockercfg-bt98g" Nov 24 09:26:33 crc kubenswrapper[4691]: I1124 09:26:33.134860 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/43902ec5-5b5d-4bc3-af67-c4a2c149b52b-host\") pod \"crc-debug-28qdw\" (UID: \"43902ec5-5b5d-4bc3-af67-c4a2c149b52b\") " pod="openshift-must-gather-qczfr/crc-debug-28qdw" Nov 24 09:26:33 crc kubenswrapper[4691]: I1124 09:26:33.134921 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hw58h\" (UniqueName: \"kubernetes.io/projected/43902ec5-5b5d-4bc3-af67-c4a2c149b52b-kube-api-access-hw58h\") pod \"crc-debug-28qdw\" (UID: \"43902ec5-5b5d-4bc3-af67-c4a2c149b52b\") " pod="openshift-must-gather-qczfr/crc-debug-28qdw" Nov 24 09:26:33 crc kubenswrapper[4691]: I1124 09:26:33.237118 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/43902ec5-5b5d-4bc3-af67-c4a2c149b52b-host\") pod \"crc-debug-28qdw\" (UID: \"43902ec5-5b5d-4bc3-af67-c4a2c149b52b\") " pod="openshift-must-gather-qczfr/crc-debug-28qdw" Nov 24 09:26:33 crc kubenswrapper[4691]: I1124 09:26:33.237194 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hw58h\" (UniqueName: \"kubernetes.io/projected/43902ec5-5b5d-4bc3-af67-c4a2c149b52b-kube-api-access-hw58h\") pod \"crc-debug-28qdw\" (UID: \"43902ec5-5b5d-4bc3-af67-c4a2c149b52b\") " pod="openshift-must-gather-qczfr/crc-debug-28qdw" Nov 24 09:26:33 crc kubenswrapper[4691]: I1124 09:26:33.237284 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/43902ec5-5b5d-4bc3-af67-c4a2c149b52b-host\") pod \"crc-debug-28qdw\" (UID: \"43902ec5-5b5d-4bc3-af67-c4a2c149b52b\") " pod="openshift-must-gather-qczfr/crc-debug-28qdw" Nov 24 09:26:33 crc kubenswrapper[4691]: I1124 09:26:33.266189 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hw58h\" (UniqueName: \"kubernetes.io/projected/43902ec5-5b5d-4bc3-af67-c4a2c149b52b-kube-api-access-hw58h\") pod \"crc-debug-28qdw\" (UID: \"43902ec5-5b5d-4bc3-af67-c4a2c149b52b\") " pod="openshift-must-gather-qczfr/crc-debug-28qdw" Nov 24 09:26:33 crc kubenswrapper[4691]: I1124 09:26:33.333335 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qczfr/crc-debug-28qdw" Nov 24 09:26:33 crc kubenswrapper[4691]: I1124 09:26:33.709994 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-qczfr/crc-debug-28qdw" event={"ID":"43902ec5-5b5d-4bc3-af67-c4a2c149b52b","Type":"ContainerStarted","Data":"bf939a6d911744cd8336bb8f8580c290a95ba17ff23bb9397640b7ea9af88811"} Nov 24 09:26:33 crc kubenswrapper[4691]: I1124 09:26:33.710566 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-qczfr/crc-debug-28qdw" event={"ID":"43902ec5-5b5d-4bc3-af67-c4a2c149b52b","Type":"ContainerStarted","Data":"109281757e9b3a09c740227f3cb2d7fe6d1b008b69960a4a32b7fd72d7239a1a"} Nov 24 09:26:33 crc kubenswrapper[4691]: I1124 09:26:33.729785 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-qczfr/crc-debug-28qdw" podStartSLOduration=1.7297689699999999 podStartE2EDuration="1.72976897s" podCreationTimestamp="2025-11-24 09:26:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 09:26:33.728157765 +0000 UTC m=+5355.727107054" watchObservedRunningTime="2025-11-24 09:26:33.72976897 +0000 UTC m=+5355.728718219" Nov 24 09:26:34 crc kubenswrapper[4691]: I1124 09:26:34.722915 4691 generic.go:334] "Generic (PLEG): container finished" podID="43902ec5-5b5d-4bc3-af67-c4a2c149b52b" containerID="bf939a6d911744cd8336bb8f8580c290a95ba17ff23bb9397640b7ea9af88811" exitCode=0 Nov 24 09:26:34 crc kubenswrapper[4691]: I1124 09:26:34.722983 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-qczfr/crc-debug-28qdw" event={"ID":"43902ec5-5b5d-4bc3-af67-c4a2c149b52b","Type":"ContainerDied","Data":"bf939a6d911744cd8336bb8f8580c290a95ba17ff23bb9397640b7ea9af88811"} Nov 24 09:26:35 crc kubenswrapper[4691]: I1124 09:26:35.879423 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qczfr/crc-debug-28qdw" Nov 24 09:26:35 crc kubenswrapper[4691]: I1124 09:26:35.985814 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hw58h\" (UniqueName: \"kubernetes.io/projected/43902ec5-5b5d-4bc3-af67-c4a2c149b52b-kube-api-access-hw58h\") pod \"43902ec5-5b5d-4bc3-af67-c4a2c149b52b\" (UID: \"43902ec5-5b5d-4bc3-af67-c4a2c149b52b\") " Nov 24 09:26:35 crc kubenswrapper[4691]: I1124 09:26:35.986231 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/43902ec5-5b5d-4bc3-af67-c4a2c149b52b-host\") pod \"43902ec5-5b5d-4bc3-af67-c4a2c149b52b\" (UID: \"43902ec5-5b5d-4bc3-af67-c4a2c149b52b\") " Nov 24 09:26:35 crc kubenswrapper[4691]: I1124 09:26:35.986468 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/43902ec5-5b5d-4bc3-af67-c4a2c149b52b-host" (OuterVolumeSpecName: "host") pod "43902ec5-5b5d-4bc3-af67-c4a2c149b52b" (UID: "43902ec5-5b5d-4bc3-af67-c4a2c149b52b"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 09:26:35 crc kubenswrapper[4691]: I1124 09:26:35.986789 4691 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/43902ec5-5b5d-4bc3-af67-c4a2c149b52b-host\") on node \"crc\" DevicePath \"\"" Nov 24 09:26:35 crc kubenswrapper[4691]: I1124 09:26:35.998976 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43902ec5-5b5d-4bc3-af67-c4a2c149b52b-kube-api-access-hw58h" (OuterVolumeSpecName: "kube-api-access-hw58h") pod "43902ec5-5b5d-4bc3-af67-c4a2c149b52b" (UID: "43902ec5-5b5d-4bc3-af67-c4a2c149b52b"). InnerVolumeSpecName "kube-api-access-hw58h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 09:26:36 crc kubenswrapper[4691]: I1124 09:26:36.089037 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hw58h\" (UniqueName: \"kubernetes.io/projected/43902ec5-5b5d-4bc3-af67-c4a2c149b52b-kube-api-access-hw58h\") on node \"crc\" DevicePath \"\"" Nov 24 09:26:36 crc kubenswrapper[4691]: I1124 09:26:36.318371 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-qczfr/crc-debug-28qdw"] Nov 24 09:26:36 crc kubenswrapper[4691]: I1124 09:26:36.326823 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-qczfr/crc-debug-28qdw"] Nov 24 09:26:36 crc kubenswrapper[4691]: I1124 09:26:36.741895 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="109281757e9b3a09c740227f3cb2d7fe6d1b008b69960a4a32b7fd72d7239a1a" Nov 24 09:26:36 crc kubenswrapper[4691]: I1124 09:26:36.741995 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qczfr/crc-debug-28qdw" Nov 24 09:26:36 crc kubenswrapper[4691]: I1124 09:26:36.770788 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43902ec5-5b5d-4bc3-af67-c4a2c149b52b" path="/var/lib/kubelet/pods/43902ec5-5b5d-4bc3-af67-c4a2c149b52b/volumes" Nov 24 09:26:37 crc kubenswrapper[4691]: I1124 09:26:37.477773 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-qczfr/crc-debug-gjxd7"] Nov 24 09:26:37 crc kubenswrapper[4691]: E1124 09:26:37.478721 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43902ec5-5b5d-4bc3-af67-c4a2c149b52b" containerName="container-00" Nov 24 09:26:37 crc kubenswrapper[4691]: I1124 09:26:37.478746 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="43902ec5-5b5d-4bc3-af67-c4a2c149b52b" containerName="container-00" Nov 24 09:26:37 crc kubenswrapper[4691]: I1124 09:26:37.479113 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="43902ec5-5b5d-4bc3-af67-c4a2c149b52b" containerName="container-00" Nov 24 09:26:37 crc kubenswrapper[4691]: I1124 09:26:37.480053 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qczfr/crc-debug-gjxd7" Nov 24 09:26:37 crc kubenswrapper[4691]: I1124 09:26:37.484654 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-qczfr"/"default-dockercfg-bt98g" Nov 24 09:26:37 crc kubenswrapper[4691]: I1124 09:26:37.517842 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8sxx\" (UniqueName: \"kubernetes.io/projected/ef8fbcc2-4c7c-4eea-b083-ff4d27212904-kube-api-access-p8sxx\") pod \"crc-debug-gjxd7\" (UID: \"ef8fbcc2-4c7c-4eea-b083-ff4d27212904\") " pod="openshift-must-gather-qczfr/crc-debug-gjxd7" Nov 24 09:26:37 crc kubenswrapper[4691]: I1124 09:26:37.518035 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ef8fbcc2-4c7c-4eea-b083-ff4d27212904-host\") pod \"crc-debug-gjxd7\" (UID: \"ef8fbcc2-4c7c-4eea-b083-ff4d27212904\") " pod="openshift-must-gather-qczfr/crc-debug-gjxd7" Nov 24 09:26:37 crc kubenswrapper[4691]: I1124 09:26:37.621272 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8sxx\" (UniqueName: \"kubernetes.io/projected/ef8fbcc2-4c7c-4eea-b083-ff4d27212904-kube-api-access-p8sxx\") pod \"crc-debug-gjxd7\" (UID: \"ef8fbcc2-4c7c-4eea-b083-ff4d27212904\") " pod="openshift-must-gather-qczfr/crc-debug-gjxd7" Nov 24 09:26:37 crc kubenswrapper[4691]: I1124 09:26:37.621360 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ef8fbcc2-4c7c-4eea-b083-ff4d27212904-host\") pod \"crc-debug-gjxd7\" (UID: \"ef8fbcc2-4c7c-4eea-b083-ff4d27212904\") " pod="openshift-must-gather-qczfr/crc-debug-gjxd7" Nov 24 09:26:37 crc kubenswrapper[4691]: I1124 09:26:37.621529 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ef8fbcc2-4c7c-4eea-b083-ff4d27212904-host\") pod \"crc-debug-gjxd7\" (UID: \"ef8fbcc2-4c7c-4eea-b083-ff4d27212904\") " pod="openshift-must-gather-qczfr/crc-debug-gjxd7" Nov 24 09:26:37 crc kubenswrapper[4691]: I1124 09:26:37.646411 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8sxx\" (UniqueName: \"kubernetes.io/projected/ef8fbcc2-4c7c-4eea-b083-ff4d27212904-kube-api-access-p8sxx\") pod \"crc-debug-gjxd7\" (UID: \"ef8fbcc2-4c7c-4eea-b083-ff4d27212904\") " pod="openshift-must-gather-qczfr/crc-debug-gjxd7" Nov 24 09:26:37 crc kubenswrapper[4691]: I1124 09:26:37.798138 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qczfr/crc-debug-gjxd7" Nov 24 09:26:38 crc kubenswrapper[4691]: I1124 09:26:38.780834 4691 generic.go:334] "Generic (PLEG): container finished" podID="ef8fbcc2-4c7c-4eea-b083-ff4d27212904" containerID="4aa9452e85a226ebaa7a3d68f2df9c62d9fc776c7829bc4c9fb367a67e6f0ffe" exitCode=0 Nov 24 09:26:38 crc kubenswrapper[4691]: I1124 09:26:38.789202 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-qczfr/crc-debug-gjxd7" event={"ID":"ef8fbcc2-4c7c-4eea-b083-ff4d27212904","Type":"ContainerDied","Data":"4aa9452e85a226ebaa7a3d68f2df9c62d9fc776c7829bc4c9fb367a67e6f0ffe"} Nov 24 09:26:38 crc kubenswrapper[4691]: I1124 09:26:38.789302 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-qczfr/crc-debug-gjxd7" event={"ID":"ef8fbcc2-4c7c-4eea-b083-ff4d27212904","Type":"ContainerStarted","Data":"32cc901c27dd92aa6325ac800fb4df18f496b808f371274721ae64f049242f07"} Nov 24 09:26:38 crc kubenswrapper[4691]: I1124 09:26:38.835277 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-qczfr/crc-debug-gjxd7"] Nov 24 09:26:38 crc kubenswrapper[4691]: I1124 09:26:38.850259 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-qczfr/crc-debug-gjxd7"] Nov 24 09:26:39 crc kubenswrapper[4691]: I1124 09:26:39.909869 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qczfr/crc-debug-gjxd7" Nov 24 09:26:39 crc kubenswrapper[4691]: I1124 09:26:39.964071 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p8sxx\" (UniqueName: \"kubernetes.io/projected/ef8fbcc2-4c7c-4eea-b083-ff4d27212904-kube-api-access-p8sxx\") pod \"ef8fbcc2-4c7c-4eea-b083-ff4d27212904\" (UID: \"ef8fbcc2-4c7c-4eea-b083-ff4d27212904\") " Nov 24 09:26:39 crc kubenswrapper[4691]: I1124 09:26:39.964492 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ef8fbcc2-4c7c-4eea-b083-ff4d27212904-host\") pod \"ef8fbcc2-4c7c-4eea-b083-ff4d27212904\" (UID: \"ef8fbcc2-4c7c-4eea-b083-ff4d27212904\") " Nov 24 09:26:39 crc kubenswrapper[4691]: I1124 09:26:39.965243 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ef8fbcc2-4c7c-4eea-b083-ff4d27212904-host" (OuterVolumeSpecName: "host") pod "ef8fbcc2-4c7c-4eea-b083-ff4d27212904" (UID: "ef8fbcc2-4c7c-4eea-b083-ff4d27212904"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 09:26:39 crc kubenswrapper[4691]: I1124 09:26:39.970969 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef8fbcc2-4c7c-4eea-b083-ff4d27212904-kube-api-access-p8sxx" (OuterVolumeSpecName: "kube-api-access-p8sxx") pod "ef8fbcc2-4c7c-4eea-b083-ff4d27212904" (UID: "ef8fbcc2-4c7c-4eea-b083-ff4d27212904"). InnerVolumeSpecName "kube-api-access-p8sxx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 09:26:40 crc kubenswrapper[4691]: I1124 09:26:40.068209 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p8sxx\" (UniqueName: \"kubernetes.io/projected/ef8fbcc2-4c7c-4eea-b083-ff4d27212904-kube-api-access-p8sxx\") on node \"crc\" DevicePath \"\"" Nov 24 09:26:40 crc kubenswrapper[4691]: I1124 09:26:40.068252 4691 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ef8fbcc2-4c7c-4eea-b083-ff4d27212904-host\") on node \"crc\" DevicePath \"\"" Nov 24 09:26:40 crc kubenswrapper[4691]: I1124 09:26:40.770739 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ef8fbcc2-4c7c-4eea-b083-ff4d27212904" path="/var/lib/kubelet/pods/ef8fbcc2-4c7c-4eea-b083-ff4d27212904/volumes" Nov 24 09:26:40 crc kubenswrapper[4691]: I1124 09:26:40.799931 4691 scope.go:117] "RemoveContainer" containerID="4aa9452e85a226ebaa7a3d68f2df9c62d9fc776c7829bc4c9fb367a67e6f0ffe" Nov 24 09:26:40 crc kubenswrapper[4691]: I1124 09:26:40.800325 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qczfr/crc-debug-gjxd7" Nov 24 09:26:53 crc kubenswrapper[4691]: I1124 09:26:53.552744 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-7d7c46cd68-xl465_f904b6e6-711f-4edd-bdaf-1eeca5979318/barbican-api/0.log" Nov 24 09:26:53 crc kubenswrapper[4691]: I1124 09:26:53.588420 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-7d7c46cd68-xl465_f904b6e6-711f-4edd-bdaf-1eeca5979318/barbican-api-log/0.log" Nov 24 09:26:53 crc kubenswrapper[4691]: I1124 09:26:53.768490 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-6866b57cd6-xcpbl_750147cd-32ed-4f3d-83e5-96798011bf10/barbican-keystone-listener/0.log" Nov 24 09:26:53 crc kubenswrapper[4691]: I1124 09:26:53.863728 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-6866b57cd6-xcpbl_750147cd-32ed-4f3d-83e5-96798011bf10/barbican-keystone-listener-log/0.log" Nov 24 09:26:53 crc kubenswrapper[4691]: I1124 09:26:53.917482 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-bcd6fbf67-bnwn2_e39f7b55-5583-421f-a817-bae68533b497/barbican-worker/0.log" Nov 24 09:26:53 crc kubenswrapper[4691]: I1124 09:26:53.956768 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-bcd6fbf67-bnwn2_e39f7b55-5583-421f-a817-bae68533b497/barbican-worker-log/0.log" Nov 24 09:26:54 crc kubenswrapper[4691]: I1124 09:26:54.085912 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92_6199a668-e1b5-473b-8ff0-2fdf26b69c79/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 09:26:54 crc kubenswrapper[4691]: I1124 09:26:54.210104 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_0e783b59-54e1-401f-a281-b665848b7083/ceilometer-central-agent/0.log" Nov 24 09:26:54 crc kubenswrapper[4691]: I1124 09:26:54.262611 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_0e783b59-54e1-401f-a281-b665848b7083/ceilometer-notification-agent/0.log" Nov 24 09:26:54 crc kubenswrapper[4691]: I1124 09:26:54.302328 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_0e783b59-54e1-401f-a281-b665848b7083/proxy-httpd/0.log" Nov 24 09:26:54 crc kubenswrapper[4691]: I1124 09:26:54.343487 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_0e783b59-54e1-401f-a281-b665848b7083/sg-core/0.log" Nov 24 09:26:54 crc kubenswrapper[4691]: I1124 09:26:54.494562 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_e9b3d587-fa7c-4af9-8667-d4ea91483ad9/cinder-api-log/0.log" Nov 24 09:26:54 crc kubenswrapper[4691]: I1124 09:26:54.561935 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_e9b3d587-fa7c-4af9-8667-d4ea91483ad9/cinder-api/0.log" Nov 24 09:26:54 crc kubenswrapper[4691]: I1124 09:26:54.717726 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_68176dd8-7480-4c30-8788-dd915e1568d5/probe/0.log" Nov 24 09:26:54 crc kubenswrapper[4691]: I1124 09:26:54.756597 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_68176dd8-7480-4c30-8788-dd915e1568d5/cinder-scheduler/0.log" Nov 24 09:26:54 crc kubenswrapper[4691]: I1124 09:26:54.832018 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-764zv_7a0ce3be-4dc4-4451-979d-0f8a4372e061/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 09:26:54 crc kubenswrapper[4691]: I1124 09:26:54.983028 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-sgd4k_1ac65fef-8c31-48ea-9715-9245e9dd717e/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 09:26:55 crc kubenswrapper[4691]: I1124 09:26:55.064653 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78c64bc9c5-gx44c_2f860729-9ea4-4236-9465-68ac2164ac5c/init/0.log" Nov 24 09:26:55 crc kubenswrapper[4691]: I1124 09:26:55.191434 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78c64bc9c5-gx44c_2f860729-9ea4-4236-9465-68ac2164ac5c/init/0.log" Nov 24 09:26:55 crc kubenswrapper[4691]: I1124 09:26:55.321320 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-svnp5_460ba73d-0917-4b4c-8ca1-141a72e6b3e4/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 09:26:55 crc kubenswrapper[4691]: I1124 09:26:55.374130 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78c64bc9c5-gx44c_2f860729-9ea4-4236-9465-68ac2164ac5c/dnsmasq-dns/0.log" Nov 24 09:26:55 crc kubenswrapper[4691]: I1124 09:26:55.462507 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3/glance-httpd/0.log" Nov 24 09:26:55 crc kubenswrapper[4691]: I1124 09:26:55.512993 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3/glance-log/0.log" Nov 24 09:26:55 crc kubenswrapper[4691]: I1124 09:26:55.662262 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_31f53279-5e1f-44f9-a1f5-338600bc0156/glance-httpd/0.log" Nov 24 09:26:55 crc kubenswrapper[4691]: I1124 09:26:55.678592 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_31f53279-5e1f-44f9-a1f5-338600bc0156/glance-log/0.log" Nov 24 09:26:55 crc kubenswrapper[4691]: I1124 09:26:55.878404 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-5fb4677cdd-69rb6_5f7435d6-aa83-41a0-b392-b06d77f53aa2/horizon/0.log" Nov 24 09:26:56 crc kubenswrapper[4691]: I1124 09:26:56.012030 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb_81f9a1f9-0d85-4aff-a92f-93e8b36724ff/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 09:26:56 crc kubenswrapper[4691]: I1124 09:26:56.228580 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-dphxx_5368c577-e1f7-45bf-9102-4e5422934e63/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 09:26:56 crc kubenswrapper[4691]: I1124 09:26:56.402759 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-5fb4677cdd-69rb6_5f7435d6-aa83-41a0-b392-b06d77f53aa2/horizon-log/0.log" Nov 24 09:26:56 crc kubenswrapper[4691]: I1124 09:26:56.486590 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29399581-wrkt4_de9d27b1-63c1-4cc9-9bd6-9d015c3122cf/keystone-cron/0.log" Nov 24 09:26:56 crc kubenswrapper[4691]: I1124 09:26:56.683431 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_9d9b7a95-3c3a-4254-b63e-214d34969aab/kube-state-metrics/0.log" Nov 24 09:26:56 crc kubenswrapper[4691]: I1124 09:26:56.916310 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm_e9953558-8b56-432e-bde8-c07beaa047c0/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 09:26:56 crc kubenswrapper[4691]: I1124 09:26:56.954721 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-54fc9d9c65-98hdh_75e3a295-29f6-49d4-91d5-c6bf791eebdd/keystone-api/0.log" Nov 24 09:26:57 crc kubenswrapper[4691]: I1124 09:26:57.479566 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2_42d1ff5d-430e-489b-9015-b8a7ad572893/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 09:26:57 crc kubenswrapper[4691]: I1124 09:26:57.641070 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-57b84ccfdc-qnsn7_8912bda5-405a-472b-a80f-2140a7bb0ded/neutron-httpd/0.log" Nov 24 09:26:57 crc kubenswrapper[4691]: I1124 09:26:57.746525 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-57b84ccfdc-qnsn7_8912bda5-405a-472b-a80f-2140a7bb0ded/neutron-api/0.log" Nov 24 09:26:58 crc kubenswrapper[4691]: I1124 09:26:58.344373 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_80ba081a-de68-4111-8dd6-ec207b574dee/nova-cell0-conductor-conductor/0.log" Nov 24 09:26:58 crc kubenswrapper[4691]: I1124 09:26:58.407027 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_70312fff-c511-48b1-a398-331d593ca41f/nova-cell1-conductor-conductor/0.log" Nov 24 09:26:58 crc kubenswrapper[4691]: I1124 09:26:58.778562 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_d624020f-236a-4048-acb6-a7db917757f6/nova-cell1-novncproxy-novncproxy/0.log" Nov 24 09:26:59 crc kubenswrapper[4691]: I1124 09:26:59.057934 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-zsxsb_bf4bcfba-eec4-43be-b119-cf8f0bdd7182/nova-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 09:26:59 crc kubenswrapper[4691]: I1124 09:26:59.542214 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_5f883df6-eeae-475d-80e8-ef121d343ae7/nova-api-log/0.log" Nov 24 09:26:59 crc kubenswrapper[4691]: I1124 09:26:59.916755 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_816aeaf6-40c5-4859-b819-bcfb46750549/nova-metadata-log/0.log" Nov 24 09:27:00 crc kubenswrapper[4691]: I1124 09:27:00.413908 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_5f883df6-eeae-475d-80e8-ef121d343ae7/nova-api-api/0.log" Nov 24 09:27:00 crc kubenswrapper[4691]: I1124 09:27:00.432552 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_5021ba85-77e5-4fc8-8816-5ad1587b82e5/mysql-bootstrap/0.log" Nov 24 09:27:00 crc kubenswrapper[4691]: I1124 09:27:00.556831 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_94098659-df1b-4792-b466-9e7a95bf19e2/nova-scheduler-scheduler/0.log" Nov 24 09:27:00 crc kubenswrapper[4691]: I1124 09:27:00.638903 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_5021ba85-77e5-4fc8-8816-5ad1587b82e5/galera/0.log" Nov 24 09:27:00 crc kubenswrapper[4691]: I1124 09:27:00.639756 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_5021ba85-77e5-4fc8-8816-5ad1587b82e5/mysql-bootstrap/0.log" Nov 24 09:27:00 crc kubenswrapper[4691]: I1124 09:27:00.848246 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_be26bfeb-e0f8-4c67-8938-55d8399b717c/mysql-bootstrap/0.log" Nov 24 09:27:01 crc kubenswrapper[4691]: I1124 09:27:01.075040 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_be26bfeb-e0f8-4c67-8938-55d8399b717c/mysql-bootstrap/0.log" Nov 24 09:27:01 crc kubenswrapper[4691]: I1124 09:27:01.124277 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_be26bfeb-e0f8-4c67-8938-55d8399b717c/galera/0.log" Nov 24 09:27:01 crc kubenswrapper[4691]: I1124 09:27:01.294914 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_86907013-52ae-4aeb-a697-6066cfdbebde/openstackclient/0.log" Nov 24 09:27:01 crc kubenswrapper[4691]: I1124 09:27:01.320780 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-jknmq_204a8833-cf7b-491a-b06a-0c983a6aa30a/ovn-controller/0.log" Nov 24 09:27:01 crc kubenswrapper[4691]: I1124 09:27:01.483674 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-hs48v_16f9ca32-c0b3-4269-af05-a68a6d21269b/openstack-network-exporter/0.log" Nov 24 09:27:01 crc kubenswrapper[4691]: I1124 09:27:01.674165 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-pkx2n_8f3c496c-e0d1-4b16-80e9-fd3c10dacf79/ovsdb-server-init/0.log" Nov 24 09:27:01 crc kubenswrapper[4691]: I1124 09:27:01.866305 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-pkx2n_8f3c496c-e0d1-4b16-80e9-fd3c10dacf79/ovs-vswitchd/0.log" Nov 24 09:27:01 crc kubenswrapper[4691]: I1124 09:27:01.893231 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-pkx2n_8f3c496c-e0d1-4b16-80e9-fd3c10dacf79/ovsdb-server-init/0.log" Nov 24 09:27:01 crc kubenswrapper[4691]: I1124 09:27:01.912628 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-pkx2n_8f3c496c-e0d1-4b16-80e9-fd3c10dacf79/ovsdb-server/0.log" Nov 24 09:27:02 crc kubenswrapper[4691]: I1124 09:27:02.124605 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-zqkwb_7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 09:27:02 crc kubenswrapper[4691]: I1124 09:27:02.311455 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_4897f50d-627f-434b-a0d8-84854f219509/openstack-network-exporter/0.log" Nov 24 09:27:02 crc kubenswrapper[4691]: I1124 09:27:02.326353 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_4897f50d-627f-434b-a0d8-84854f219509/ovn-northd/0.log" Nov 24 09:27:02 crc kubenswrapper[4691]: I1124 09:27:02.540263 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_7d275bbe-d927-40c6-83b6-ad6da7f2a83c/openstack-network-exporter/0.log" Nov 24 09:27:02 crc kubenswrapper[4691]: I1124 09:27:02.559583 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_7d275bbe-d927-40c6-83b6-ad6da7f2a83c/ovsdbserver-nb/0.log" Nov 24 09:27:02 crc kubenswrapper[4691]: I1124 09:27:02.736854 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_57b5f932-160d-453a-ad0b-2b111085fda8/openstack-network-exporter/0.log" Nov 24 09:27:02 crc kubenswrapper[4691]: I1124 09:27:02.762848 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_57b5f932-160d-453a-ad0b-2b111085fda8/ovsdbserver-sb/0.log" Nov 24 09:27:02 crc kubenswrapper[4691]: I1124 09:27:02.856644 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_816aeaf6-40c5-4859-b819-bcfb46750549/nova-metadata-metadata/0.log" Nov 24 09:27:03 crc kubenswrapper[4691]: I1124 09:27:03.145203 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_4e65164c-c11a-4774-808c-f0dbdf7f9ffa/setup-container/0.log" Nov 24 09:27:03 crc kubenswrapper[4691]: I1124 09:27:03.214691 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-77fc7f8568-9mx5z_acaed1f5-7a77-46a1-936d-e0fa2a02767b/placement-api/0.log" Nov 24 09:27:03 crc kubenswrapper[4691]: I1124 09:27:03.329934 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-77fc7f8568-9mx5z_acaed1f5-7a77-46a1-936d-e0fa2a02767b/placement-log/0.log" Nov 24 09:27:03 crc kubenswrapper[4691]: I1124 09:27:03.346108 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_4e65164c-c11a-4774-808c-f0dbdf7f9ffa/setup-container/0.log" Nov 24 09:27:03 crc kubenswrapper[4691]: I1124 09:27:03.408859 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_4e65164c-c11a-4774-808c-f0dbdf7f9ffa/rabbitmq/0.log" Nov 24 09:27:03 crc kubenswrapper[4691]: I1124 09:27:03.561022 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_19b40ace-19bb-41b3-8b25-f93691331766/setup-container/0.log" Nov 24 09:27:03 crc kubenswrapper[4691]: I1124 09:27:03.738320 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_19b40ace-19bb-41b3-8b25-f93691331766/rabbitmq/0.log" Nov 24 09:27:03 crc kubenswrapper[4691]: I1124 09:27:03.767319 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_19b40ace-19bb-41b3-8b25-f93691331766/setup-container/0.log" Nov 24 09:27:03 crc kubenswrapper[4691]: I1124 09:27:03.846777 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-d5v6d_d9a70a19-1e34-4bf7-8b91-ed6df2838313/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 09:27:03 crc kubenswrapper[4691]: I1124 09:27:03.990960 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-ncxlx_4e07a8ba-4deb-45cb-8ecd-423300eadb7a/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 09:27:04 crc kubenswrapper[4691]: I1124 09:27:04.070231 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd_7b0cd66f-4531-45fd-aea8-00726f118662/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 09:27:04 crc kubenswrapper[4691]: I1124 09:27:04.216679 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-fgbtw_c306f14b-da97-42e1-87cc-612779e690e7/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 09:27:04 crc kubenswrapper[4691]: I1124 09:27:04.308720 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-6tlcl_c5c8e953-d111-42cd-8930-ee2c8f4242dd/ssh-known-hosts-edpm-deployment/0.log" Nov 24 09:27:04 crc kubenswrapper[4691]: I1124 09:27:04.596621 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-6bf54cf5bc-7wgwz_9a9213e2-4a1f-4d15-ab02-472c467babfe/proxy-server/0.log" Nov 24 09:27:04 crc kubenswrapper[4691]: I1124 09:27:04.668201 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-6bf54cf5bc-7wgwz_9a9213e2-4a1f-4d15-ab02-472c467babfe/proxy-httpd/0.log" Nov 24 09:27:04 crc kubenswrapper[4691]: I1124 09:27:04.725714 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-tzjh7_5c00da32-542e-45b4-837c-67fa08ff49d3/swift-ring-rebalance/0.log" Nov 24 09:27:04 crc kubenswrapper[4691]: I1124 09:27:04.864792 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_94ab9159-218c-42b9-9c38-8e0701f3eeef/account-auditor/0.log" Nov 24 09:27:04 crc kubenswrapper[4691]: I1124 09:27:04.916469 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_94ab9159-218c-42b9-9c38-8e0701f3eeef/account-reaper/0.log" Nov 24 09:27:05 crc kubenswrapper[4691]: I1124 09:27:05.014423 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_94ab9159-218c-42b9-9c38-8e0701f3eeef/account-replicator/0.log" Nov 24 09:27:05 crc kubenswrapper[4691]: I1124 09:27:05.138027 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_94ab9159-218c-42b9-9c38-8e0701f3eeef/account-server/0.log" Nov 24 09:27:05 crc kubenswrapper[4691]: I1124 09:27:05.181147 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_94ab9159-218c-42b9-9c38-8e0701f3eeef/container-auditor/0.log" Nov 24 09:27:05 crc kubenswrapper[4691]: I1124 09:27:05.202574 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_94ab9159-218c-42b9-9c38-8e0701f3eeef/container-replicator/0.log" Nov 24 09:27:05 crc kubenswrapper[4691]: I1124 09:27:05.237716 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_94ab9159-218c-42b9-9c38-8e0701f3eeef/container-server/0.log" Nov 24 09:27:05 crc kubenswrapper[4691]: I1124 09:27:05.329013 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_94ab9159-218c-42b9-9c38-8e0701f3eeef/container-updater/0.log" Nov 24 09:27:05 crc kubenswrapper[4691]: I1124 09:27:05.404208 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_94ab9159-218c-42b9-9c38-8e0701f3eeef/object-expirer/0.log" Nov 24 09:27:05 crc kubenswrapper[4691]: I1124 09:27:05.459507 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_94ab9159-218c-42b9-9c38-8e0701f3eeef/object-auditor/0.log" Nov 24 09:27:05 crc kubenswrapper[4691]: I1124 09:27:05.470602 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_94ab9159-218c-42b9-9c38-8e0701f3eeef/object-replicator/0.log" Nov 24 09:27:05 crc kubenswrapper[4691]: I1124 09:27:05.603252 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_94ab9159-218c-42b9-9c38-8e0701f3eeef/object-updater/0.log" Nov 24 09:27:05 crc kubenswrapper[4691]: I1124 09:27:05.608409 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_94ab9159-218c-42b9-9c38-8e0701f3eeef/object-server/0.log" Nov 24 09:27:05 crc kubenswrapper[4691]: I1124 09:27:05.676209 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_94ab9159-218c-42b9-9c38-8e0701f3eeef/rsync/0.log" Nov 24 09:27:05 crc kubenswrapper[4691]: I1124 09:27:05.717285 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_94ab9159-218c-42b9-9c38-8e0701f3eeef/swift-recon-cron/0.log" Nov 24 09:27:05 crc kubenswrapper[4691]: I1124 09:27:05.870093 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp_b19b3af1-e299-46ab-b579-902390cb75a3/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 09:27:05 crc kubenswrapper[4691]: I1124 09:27:05.961753 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-zbkq4_fb487f8d-8df8-4b2d-9b08-647a942d8559/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 09:27:07 crc kubenswrapper[4691]: I1124 09:27:07.753569 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_cb7ce1da-e87a-4d10-b6ad-f9f2e0d022b4/memcached/0.log" Nov 24 09:27:26 crc kubenswrapper[4691]: I1124 09:27:26.300851 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw_d08a87e6-8ffb-408e-86d2-ff4994f07ed9/util/0.log" Nov 24 09:27:26 crc kubenswrapper[4691]: I1124 09:27:26.564622 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw_d08a87e6-8ffb-408e-86d2-ff4994f07ed9/util/0.log" Nov 24 09:27:26 crc kubenswrapper[4691]: I1124 09:27:26.565323 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw_d08a87e6-8ffb-408e-86d2-ff4994f07ed9/pull/0.log" Nov 24 09:27:26 crc kubenswrapper[4691]: I1124 09:27:26.574570 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw_d08a87e6-8ffb-408e-86d2-ff4994f07ed9/pull/0.log" Nov 24 09:27:26 crc kubenswrapper[4691]: I1124 09:27:26.690008 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw_d08a87e6-8ffb-408e-86d2-ff4994f07ed9/util/0.log" Nov 24 09:27:26 crc kubenswrapper[4691]: I1124 09:27:26.753517 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw_d08a87e6-8ffb-408e-86d2-ff4994f07ed9/pull/0.log" Nov 24 09:27:26 crc kubenswrapper[4691]: I1124 09:27:26.757504 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw_d08a87e6-8ffb-408e-86d2-ff4994f07ed9/extract/0.log" Nov 24 09:27:26 crc kubenswrapper[4691]: I1124 09:27:26.868662 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-86dc4d89c8-ppdhs_22fec998-136d-4bc0-9db1-1e4ac6e1107c/kube-rbac-proxy/0.log" Nov 24 09:27:26 crc kubenswrapper[4691]: I1124 09:27:26.935707 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-79856dc55c-6jgx4_132ed997-05f1-4484-a11a-3e282b0e889b/kube-rbac-proxy/0.log" Nov 24 09:27:27 crc kubenswrapper[4691]: I1124 09:27:27.018275 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-86dc4d89c8-ppdhs_22fec998-136d-4bc0-9db1-1e4ac6e1107c/manager/0.log" Nov 24 09:27:27 crc kubenswrapper[4691]: I1124 09:27:27.107710 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-79856dc55c-6jgx4_132ed997-05f1-4484-a11a-3e282b0e889b/manager/0.log" Nov 24 09:27:27 crc kubenswrapper[4691]: I1124 09:27:27.210754 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-7d695c9b56-ncq2x_24f62db2-c526-493e-a703-43a661ea0228/manager/0.log" Nov 24 09:27:27 crc kubenswrapper[4691]: I1124 09:27:27.245384 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-7d695c9b56-ncq2x_24f62db2-c526-493e-a703-43a661ea0228/kube-rbac-proxy/0.log" Nov 24 09:27:27 crc kubenswrapper[4691]: I1124 09:27:27.362388 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-68b95954c9-f7g9v_bc63ea3b-7e56-46cc-b2e9-2ff7b0cddc2e/kube-rbac-proxy/0.log" Nov 24 09:27:27 crc kubenswrapper[4691]: I1124 09:27:27.458059 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-774b86978c-cql69_df3746c8-ec8b-406e-b2f5-7bd93dd46646/kube-rbac-proxy/0.log" Nov 24 09:27:27 crc kubenswrapper[4691]: I1124 09:27:27.541327 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-68b95954c9-f7g9v_bc63ea3b-7e56-46cc-b2e9-2ff7b0cddc2e/manager/0.log" Nov 24 09:27:27 crc kubenswrapper[4691]: I1124 09:27:27.596634 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-774b86978c-cql69_df3746c8-ec8b-406e-b2f5-7bd93dd46646/manager/0.log" Nov 24 09:27:27 crc kubenswrapper[4691]: I1124 09:27:27.639073 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c9694994-jctfk_39df322c-3527-4b0d-a719-4ecbfa944a56/kube-rbac-proxy/0.log" Nov 24 09:27:27 crc kubenswrapper[4691]: I1124 09:27:27.812392 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c9694994-jctfk_39df322c-3527-4b0d-a719-4ecbfa944a56/manager/0.log" Nov 24 09:27:27 crc kubenswrapper[4691]: I1124 09:27:27.830805 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-d5cc86f4b-v7vtk_7e82629b-ee44-488b-bdd3-58f078070f7e/kube-rbac-proxy/0.log" Nov 24 09:27:28 crc kubenswrapper[4691]: I1124 09:27:28.008269 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-d5cc86f4b-v7vtk_7e82629b-ee44-488b-bdd3-58f078070f7e/manager/0.log" Nov 24 09:27:28 crc kubenswrapper[4691]: I1124 09:27:28.060883 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-5bfcdc958c-nfx6g_f8a9119f-fc7e-4bb6-89da-91f7655c633d/manager/0.log" Nov 24 09:27:28 crc kubenswrapper[4691]: I1124 09:27:28.063109 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-5bfcdc958c-nfx6g_f8a9119f-fc7e-4bb6-89da-91f7655c633d/kube-rbac-proxy/0.log" Nov 24 09:27:28 crc kubenswrapper[4691]: I1124 09:27:28.258588 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-748dc6576f-2w275_be284da4-49c2-4967-a810-eb5dbece93a3/kube-rbac-proxy/0.log" Nov 24 09:27:28 crc kubenswrapper[4691]: I1124 09:27:28.331330 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-748dc6576f-2w275_be284da4-49c2-4967-a810-eb5dbece93a3/manager/0.log" Nov 24 09:27:28 crc kubenswrapper[4691]: I1124 09:27:28.418045 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58bb8d67cc-clqqr_c2acb14d-547e-4528-addc-5bb388370b04/kube-rbac-proxy/0.log" Nov 24 09:27:28 crc kubenswrapper[4691]: I1124 09:27:28.495012 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58bb8d67cc-clqqr_c2acb14d-547e-4528-addc-5bb388370b04/manager/0.log" Nov 24 09:27:28 crc kubenswrapper[4691]: I1124 09:27:28.587373 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-cb6c4fdb7-vnlb4_f4138dbf-cfaf-4a82-bf69-d6065584d1ba/kube-rbac-proxy/0.log" Nov 24 09:27:28 crc kubenswrapper[4691]: I1124 09:27:28.630634 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-cb6c4fdb7-vnlb4_f4138dbf-cfaf-4a82-bf69-d6065584d1ba/manager/0.log" Nov 24 09:27:28 crc kubenswrapper[4691]: I1124 09:27:28.754207 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-7c57c8bbc4-8tqbw_66685e8a-e196-444b-9149-e7861ff2c8b5/kube-rbac-proxy/0.log" Nov 24 09:27:28 crc kubenswrapper[4691]: I1124 09:27:28.932067 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-7c57c8bbc4-8tqbw_66685e8a-e196-444b-9149-e7861ff2c8b5/manager/0.log" Nov 24 09:27:28 crc kubenswrapper[4691]: I1124 09:27:28.948707 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-bh7th_0eb9999f-a946-4946-83e0-6cbf7be82741/kube-rbac-proxy/0.log" Nov 24 09:27:29 crc kubenswrapper[4691]: I1124 09:27:29.068516 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-bh7th_0eb9999f-a946-4946-83e0-6cbf7be82741/manager/0.log" Nov 24 09:27:29 crc kubenswrapper[4691]: I1124 09:27:29.164830 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-fd75fd47d-sr8nk_1c460dd6-5f3d-4eae-9436-c46ccd900674/manager/0.log" Nov 24 09:27:29 crc kubenswrapper[4691]: I1124 09:27:29.220029 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-fd75fd47d-sr8nk_1c460dd6-5f3d-4eae-9436-c46ccd900674/kube-rbac-proxy/0.log" Nov 24 09:27:29 crc kubenswrapper[4691]: I1124 09:27:29.316918 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d_b9f37eec-f8fc-4083-b29a-4e704c802c8a/kube-rbac-proxy/0.log" Nov 24 09:27:29 crc kubenswrapper[4691]: I1124 09:27:29.358353 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d_b9f37eec-f8fc-4083-b29a-4e704c802c8a/manager/0.log" Nov 24 09:27:29 crc kubenswrapper[4691]: I1124 09:27:29.767915 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-4ppc7_c57ebb8d-e8cb-4e4a-af63-e79986c327a5/registry-server/0.log" Nov 24 09:27:29 crc kubenswrapper[4691]: I1124 09:27:29.768092 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-54cb99d74c-jrmkn_2aa0febc-e96d-419c-855c-bae0db1c6d11/operator/0.log" Nov 24 09:27:29 crc kubenswrapper[4691]: I1124 09:27:29.955110 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-554b4f8994-dck8w_f46c7222-cbb0-457d-bb11-15d8cb855c8b/kube-rbac-proxy/0.log" Nov 24 09:27:30 crc kubenswrapper[4691]: I1124 09:27:30.106964 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-554b4f8994-dck8w_f46c7222-cbb0-457d-bb11-15d8cb855c8b/manager/0.log" Nov 24 09:27:30 crc kubenswrapper[4691]: I1124 09:27:30.224290 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5db546f9d9-kp2bb_63c87b6f-c210-4837-bde9-87436a88578f/kube-rbac-proxy/0.log" Nov 24 09:27:30 crc kubenswrapper[4691]: I1124 09:27:30.388457 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-65lbw_f3bb505d-02c4-49ec-94c5-a349cb5a4468/operator/0.log" Nov 24 09:27:30 crc kubenswrapper[4691]: I1124 09:27:30.408313 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5db546f9d9-kp2bb_63c87b6f-c210-4837-bde9-87436a88578f/manager/0.log" Nov 24 09:27:30 crc kubenswrapper[4691]: I1124 09:27:30.560691 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-6fdc4fcf86-wmpvm_c7e06db1-dbe0-48c4-ba25-ef962e6cd3d7/kube-rbac-proxy/0.log" Nov 24 09:27:30 crc kubenswrapper[4691]: I1124 09:27:30.711797 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-6fdc4fcf86-wmpvm_c7e06db1-dbe0-48c4-ba25-ef962e6cd3d7/manager/0.log" Nov 24 09:27:30 crc kubenswrapper[4691]: I1124 09:27:30.743078 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-567f98c9d-sn2x6_0ed3c5b9-6275-4aa7-9f4c-a5e7ae6404f2/kube-rbac-proxy/0.log" Nov 24 09:27:30 crc kubenswrapper[4691]: I1124 09:27:30.806906 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-567f98c9d-sn2x6_0ed3c5b9-6275-4aa7-9f4c-a5e7ae6404f2/manager/0.log" Nov 24 09:27:30 crc kubenswrapper[4691]: I1124 09:27:30.887971 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7888ffcffd-8jst5_603e76a3-8258-43ec-850b-d2c34845cd8b/manager/0.log" Nov 24 09:27:30 crc kubenswrapper[4691]: I1124 09:27:30.932739 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cb74df96-4czcs_ccc21638-592f-4e4f-87df-f95f79a5c23e/manager/0.log" Nov 24 09:27:30 crc kubenswrapper[4691]: I1124 09:27:30.970317 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cb74df96-4czcs_ccc21638-592f-4e4f-87df-f95f79a5c23e/kube-rbac-proxy/0.log" Nov 24 09:27:31 crc kubenswrapper[4691]: I1124 09:27:31.055050 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-864885998-8qh9m_345576fd-a4cd-4c76-8c81-3669a42be294/kube-rbac-proxy/0.log" Nov 24 09:27:31 crc kubenswrapper[4691]: I1124 09:27:31.100812 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-864885998-8qh9m_345576fd-a4cd-4c76-8c81-3669a42be294/manager/0.log" Nov 24 09:27:47 crc kubenswrapper[4691]: I1124 09:27:47.345308 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-4npq9_d7748b2b-46c9-4709-bb46-545d8209bb5f/control-plane-machine-set-operator/0.log" Nov 24 09:27:47 crc kubenswrapper[4691]: I1124 09:27:47.506622 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-m8bj7_12724cb5-e0ed-4c92-93e6-0f223dd11bea/kube-rbac-proxy/0.log" Nov 24 09:27:47 crc kubenswrapper[4691]: I1124 09:27:47.536902 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-m8bj7_12724cb5-e0ed-4c92-93e6-0f223dd11bea/machine-api-operator/0.log" Nov 24 09:27:58 crc kubenswrapper[4691]: I1124 09:27:58.649173 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-58b88_345048fa-fc45-40c3-bd90-e517c3594a2a/cert-manager-controller/0.log" Nov 24 09:27:58 crc kubenswrapper[4691]: I1124 09:27:58.801040 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-5rltx_78886f3b-0708-4e26-bc7d-ade51d1b3e9c/cert-manager-cainjector/0.log" Nov 24 09:27:58 crc kubenswrapper[4691]: I1124 09:27:58.852019 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-lmkrt_40cf1922-077a-482f-9ffa-7dd636da29ef/cert-manager-webhook/0.log" Nov 24 09:28:10 crc kubenswrapper[4691]: I1124 09:28:10.181348 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-nrlp6_8a7fc372-f01f-497b-b1bd-c508371d6069/nmstate-console-plugin/0.log" Nov 24 09:28:10 crc kubenswrapper[4691]: I1124 09:28:10.354599 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-sk5rc_17c32358-060b-4f32-abec-0eac2e40eca1/nmstate-handler/0.log" Nov 24 09:28:10 crc kubenswrapper[4691]: I1124 09:28:10.418559 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-fjpwj_6fc26c17-4027-42aa-821e-b3e5c1f92226/nmstate-metrics/0.log" Nov 24 09:28:10 crc kubenswrapper[4691]: I1124 09:28:10.455074 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-fjpwj_6fc26c17-4027-42aa-821e-b3e5c1f92226/kube-rbac-proxy/0.log" Nov 24 09:28:10 crc kubenswrapper[4691]: I1124 09:28:10.606612 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-5fxr5_cd1dce5a-f168-4208-879b-f4132bf30307/nmstate-operator/0.log" Nov 24 09:28:10 crc kubenswrapper[4691]: I1124 09:28:10.667164 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-xq22b_d877fe85-0260-4e8f-89c9-ad96a8466bee/nmstate-webhook/0.log" Nov 24 09:28:21 crc kubenswrapper[4691]: I1124 09:28:21.088911 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 09:28:21 crc kubenswrapper[4691]: I1124 09:28:21.089588 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 09:28:23 crc kubenswrapper[4691]: I1124 09:28:23.766125 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-wgxjz_8a2bb6bf-c15d-40aa-9af4-b4c55f67acff/kube-rbac-proxy/0.log" Nov 24 09:28:23 crc kubenswrapper[4691]: I1124 09:28:23.832982 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-wgxjz_8a2bb6bf-c15d-40aa-9af4-b4c55f67acff/controller/0.log" Nov 24 09:28:23 crc kubenswrapper[4691]: I1124 09:28:23.938326 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/cp-frr-files/0.log" Nov 24 09:28:24 crc kubenswrapper[4691]: I1124 09:28:24.163925 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/cp-frr-files/0.log" Nov 24 09:28:24 crc kubenswrapper[4691]: I1124 09:28:24.163979 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/cp-metrics/0.log" Nov 24 09:28:24 crc kubenswrapper[4691]: I1124 09:28:24.177639 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/cp-reloader/0.log" Nov 24 09:28:24 crc kubenswrapper[4691]: I1124 09:28:24.225150 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/cp-reloader/0.log" Nov 24 09:28:24 crc kubenswrapper[4691]: I1124 09:28:24.407884 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/cp-reloader/0.log" Nov 24 09:28:24 crc kubenswrapper[4691]: I1124 09:28:24.410738 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/cp-metrics/0.log" Nov 24 09:28:24 crc kubenswrapper[4691]: I1124 09:28:24.411386 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/cp-metrics/0.log" Nov 24 09:28:24 crc kubenswrapper[4691]: I1124 09:28:24.417869 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/cp-frr-files/0.log" Nov 24 09:28:24 crc kubenswrapper[4691]: I1124 09:28:24.667619 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/cp-frr-files/0.log" Nov 24 09:28:24 crc kubenswrapper[4691]: I1124 09:28:24.671210 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/cp-metrics/0.log" Nov 24 09:28:24 crc kubenswrapper[4691]: I1124 09:28:24.706659 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/cp-reloader/0.log" Nov 24 09:28:24 crc kubenswrapper[4691]: I1124 09:28:24.723976 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/controller/0.log" Nov 24 09:28:24 crc kubenswrapper[4691]: I1124 09:28:24.833255 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/frr-metrics/0.log" Nov 24 09:28:24 crc kubenswrapper[4691]: I1124 09:28:24.945234 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/kube-rbac-proxy/0.log" Nov 24 09:28:24 crc kubenswrapper[4691]: I1124 09:28:24.975595 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/kube-rbac-proxy-frr/0.log" Nov 24 09:28:25 crc kubenswrapper[4691]: I1124 09:28:25.079519 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/reloader/0.log" Nov 24 09:28:25 crc kubenswrapper[4691]: I1124 09:28:25.158170 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-bbnct_e740c0fa-a972-42fe-8e95-aaed01b46916/frr-k8s-webhook-server/0.log" Nov 24 09:28:25 crc kubenswrapper[4691]: I1124 09:28:25.371837 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-86b4f5566f-xknt4_5c353a67-2f3f-4608-a19e-406c31bae85a/manager/0.log" Nov 24 09:28:25 crc kubenswrapper[4691]: I1124 09:28:25.608303 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-577d7cd9f7-t5x6j_42e2daa4-034f-4fe6-852e-479d1a2570bb/webhook-server/0.log" Nov 24 09:28:25 crc kubenswrapper[4691]: I1124 09:28:25.622139 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-b884p_e76d55fd-c894-4236-8921-8b60a88125f7/kube-rbac-proxy/0.log" Nov 24 09:28:26 crc kubenswrapper[4691]: I1124 09:28:26.370042 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-b884p_e76d55fd-c894-4236-8921-8b60a88125f7/speaker/0.log" Nov 24 09:28:26 crc kubenswrapper[4691]: I1124 09:28:26.636567 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/frr/0.log" Nov 24 09:28:37 crc kubenswrapper[4691]: I1124 09:28:37.745370 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l_05d371c2-4e47-4da1-bfc9-b53160c5d377/util/0.log" Nov 24 09:28:37 crc kubenswrapper[4691]: I1124 09:28:37.859246 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l_05d371c2-4e47-4da1-bfc9-b53160c5d377/pull/0.log" Nov 24 09:28:37 crc kubenswrapper[4691]: I1124 09:28:37.860685 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l_05d371c2-4e47-4da1-bfc9-b53160c5d377/util/0.log" Nov 24 09:28:37 crc kubenswrapper[4691]: I1124 09:28:37.933406 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l_05d371c2-4e47-4da1-bfc9-b53160c5d377/pull/0.log" Nov 24 09:28:38 crc kubenswrapper[4691]: I1124 09:28:38.108583 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l_05d371c2-4e47-4da1-bfc9-b53160c5d377/util/0.log" Nov 24 09:28:38 crc kubenswrapper[4691]: I1124 09:28:38.125490 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l_05d371c2-4e47-4da1-bfc9-b53160c5d377/pull/0.log" Nov 24 09:28:38 crc kubenswrapper[4691]: I1124 09:28:38.135625 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l_05d371c2-4e47-4da1-bfc9-b53160c5d377/extract/0.log" Nov 24 09:28:38 crc kubenswrapper[4691]: I1124 09:28:38.270957 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jq477_5a985ac0-8176-499f-86d2-f58210944072/extract-utilities/0.log" Nov 24 09:28:38 crc kubenswrapper[4691]: I1124 09:28:38.446034 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jq477_5a985ac0-8176-499f-86d2-f58210944072/extract-content/0.log" Nov 24 09:28:38 crc kubenswrapper[4691]: I1124 09:28:38.474107 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jq477_5a985ac0-8176-499f-86d2-f58210944072/extract-utilities/0.log" Nov 24 09:28:38 crc kubenswrapper[4691]: I1124 09:28:38.484830 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jq477_5a985ac0-8176-499f-86d2-f58210944072/extract-content/0.log" Nov 24 09:28:38 crc kubenswrapper[4691]: I1124 09:28:38.630983 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jq477_5a985ac0-8176-499f-86d2-f58210944072/extract-utilities/0.log" Nov 24 09:28:38 crc kubenswrapper[4691]: I1124 09:28:38.689305 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jq477_5a985ac0-8176-499f-86d2-f58210944072/extract-content/0.log" Nov 24 09:28:38 crc kubenswrapper[4691]: I1124 09:28:38.919592 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zkkjt_25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409/extract-utilities/0.log" Nov 24 09:28:39 crc kubenswrapper[4691]: I1124 09:28:39.076216 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zkkjt_25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409/extract-content/0.log" Nov 24 09:28:39 crc kubenswrapper[4691]: I1124 09:28:39.170596 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zkkjt_25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409/extract-utilities/0.log" Nov 24 09:28:39 crc kubenswrapper[4691]: I1124 09:28:39.195246 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zkkjt_25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409/extract-content/0.log" Nov 24 09:28:39 crc kubenswrapper[4691]: I1124 09:28:39.406543 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zkkjt_25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409/extract-content/0.log" Nov 24 09:28:39 crc kubenswrapper[4691]: I1124 09:28:39.428936 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jq477_5a985ac0-8176-499f-86d2-f58210944072/registry-server/0.log" Nov 24 09:28:39 crc kubenswrapper[4691]: I1124 09:28:39.502990 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zkkjt_25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409/extract-utilities/0.log" Nov 24 09:28:39 crc kubenswrapper[4691]: I1124 09:28:39.712338 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd_6a028a1d-8792-4943-bbca-370668d5c1b2/util/0.log" Nov 24 09:28:39 crc kubenswrapper[4691]: I1124 09:28:39.914427 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd_6a028a1d-8792-4943-bbca-370668d5c1b2/pull/0.log" Nov 24 09:28:39 crc kubenswrapper[4691]: I1124 09:28:39.960764 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd_6a028a1d-8792-4943-bbca-370668d5c1b2/pull/0.log" Nov 24 09:28:39 crc kubenswrapper[4691]: I1124 09:28:39.994242 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd_6a028a1d-8792-4943-bbca-370668d5c1b2/util/0.log" Nov 24 09:28:40 crc kubenswrapper[4691]: I1124 09:28:40.175940 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd_6a028a1d-8792-4943-bbca-370668d5c1b2/pull/0.log" Nov 24 09:28:40 crc kubenswrapper[4691]: I1124 09:28:40.235862 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd_6a028a1d-8792-4943-bbca-370668d5c1b2/util/0.log" Nov 24 09:28:40 crc kubenswrapper[4691]: I1124 09:28:40.263558 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zkkjt_25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409/registry-server/0.log" Nov 24 09:28:40 crc kubenswrapper[4691]: I1124 09:28:40.265177 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd_6a028a1d-8792-4943-bbca-370668d5c1b2/extract/0.log" Nov 24 09:28:40 crc kubenswrapper[4691]: I1124 09:28:40.417372 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-wgwxf_74a9daa2-7bfc-487c-9990-9848391da95d/marketplace-operator/0.log" Nov 24 09:28:40 crc kubenswrapper[4691]: I1124 09:28:40.485231 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ggwnq_e4c460a6-e0d6-48d6-a225-b4e73926b492/extract-utilities/0.log" Nov 24 09:28:40 crc kubenswrapper[4691]: I1124 09:28:40.651082 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ggwnq_e4c460a6-e0d6-48d6-a225-b4e73926b492/extract-content/0.log" Nov 24 09:28:40 crc kubenswrapper[4691]: I1124 09:28:40.689370 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ggwnq_e4c460a6-e0d6-48d6-a225-b4e73926b492/extract-content/0.log" Nov 24 09:28:40 crc kubenswrapper[4691]: I1124 09:28:40.698930 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ggwnq_e4c460a6-e0d6-48d6-a225-b4e73926b492/extract-utilities/0.log" Nov 24 09:28:40 crc kubenswrapper[4691]: I1124 09:28:40.866039 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ggwnq_e4c460a6-e0d6-48d6-a225-b4e73926b492/extract-content/0.log" Nov 24 09:28:40 crc kubenswrapper[4691]: I1124 09:28:40.886735 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ggwnq_e4c460a6-e0d6-48d6-a225-b4e73926b492/extract-utilities/0.log" Nov 24 09:28:41 crc kubenswrapper[4691]: I1124 09:28:41.087698 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-dxnpr_d053d137-22fe-4850-8694-717346625cf6/extract-utilities/0.log" Nov 24 09:28:41 crc kubenswrapper[4691]: I1124 09:28:41.109530 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ggwnq_e4c460a6-e0d6-48d6-a225-b4e73926b492/registry-server/0.log" Nov 24 09:28:41 crc kubenswrapper[4691]: I1124 09:28:41.254104 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-dxnpr_d053d137-22fe-4850-8694-717346625cf6/extract-content/0.log" Nov 24 09:28:41 crc kubenswrapper[4691]: I1124 09:28:41.260195 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-dxnpr_d053d137-22fe-4850-8694-717346625cf6/extract-utilities/0.log" Nov 24 09:28:41 crc kubenswrapper[4691]: I1124 09:28:41.304231 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-dxnpr_d053d137-22fe-4850-8694-717346625cf6/extract-content/0.log" Nov 24 09:28:41 crc kubenswrapper[4691]: I1124 09:28:41.459927 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-dxnpr_d053d137-22fe-4850-8694-717346625cf6/extract-utilities/0.log" Nov 24 09:28:41 crc kubenswrapper[4691]: I1124 09:28:41.502373 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-dxnpr_d053d137-22fe-4850-8694-717346625cf6/extract-content/0.log" Nov 24 09:28:41 crc kubenswrapper[4691]: I1124 09:28:41.638640 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-dxnpr_d053d137-22fe-4850-8694-717346625cf6/registry-server/0.log" Nov 24 09:28:51 crc kubenswrapper[4691]: I1124 09:28:51.089930 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 09:28:51 crc kubenswrapper[4691]: I1124 09:28:51.090668 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 09:29:21 crc kubenswrapper[4691]: I1124 09:29:21.089647 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 09:29:21 crc kubenswrapper[4691]: I1124 09:29:21.090311 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 09:29:21 crc kubenswrapper[4691]: I1124 09:29:21.090361 4691 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 09:29:21 crc kubenswrapper[4691]: I1124 09:29:21.091143 4691 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"66a56f3fd5650c8a5dc80218e98268dc9ad3afeff230b247f513bb7c5ff0fe3d"} pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 09:29:21 crc kubenswrapper[4691]: I1124 09:29:21.091200 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" containerID="cri-o://66a56f3fd5650c8a5dc80218e98268dc9ad3afeff230b247f513bb7c5ff0fe3d" gracePeriod=600 Nov 24 09:29:21 crc kubenswrapper[4691]: I1124 09:29:21.334808 4691 generic.go:334] "Generic (PLEG): container finished" podID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerID="66a56f3fd5650c8a5dc80218e98268dc9ad3afeff230b247f513bb7c5ff0fe3d" exitCode=0 Nov 24 09:29:21 crc kubenswrapper[4691]: I1124 09:29:21.335293 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerDied","Data":"66a56f3fd5650c8a5dc80218e98268dc9ad3afeff230b247f513bb7c5ff0fe3d"} Nov 24 09:29:21 crc kubenswrapper[4691]: I1124 09:29:21.335812 4691 scope.go:117] "RemoveContainer" containerID="f0361322acc6980ed49e64daf4258e8bc09b1cf31948b359c5ff23352e9d3b26" Nov 24 09:29:22 crc kubenswrapper[4691]: I1124 09:29:22.347821 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerStarted","Data":"9d5a7ae14d2062d9b51ce6e4e46f00f60586eef65cf9e6e4ed8bcf4d441bfe6c"} Nov 24 09:30:00 crc kubenswrapper[4691]: I1124 09:30:00.159634 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399610-kck2c"] Nov 24 09:30:00 crc kubenswrapper[4691]: E1124 09:30:00.160792 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef8fbcc2-4c7c-4eea-b083-ff4d27212904" containerName="container-00" Nov 24 09:30:00 crc kubenswrapper[4691]: I1124 09:30:00.160811 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef8fbcc2-4c7c-4eea-b083-ff4d27212904" containerName="container-00" Nov 24 09:30:00 crc kubenswrapper[4691]: I1124 09:30:00.161059 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef8fbcc2-4c7c-4eea-b083-ff4d27212904" containerName="container-00" Nov 24 09:30:00 crc kubenswrapper[4691]: I1124 09:30:00.161939 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399610-kck2c" Nov 24 09:30:00 crc kubenswrapper[4691]: I1124 09:30:00.164475 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 24 09:30:00 crc kubenswrapper[4691]: I1124 09:30:00.165611 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 24 09:30:00 crc kubenswrapper[4691]: I1124 09:30:00.175942 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399610-kck2c"] Nov 24 09:30:00 crc kubenswrapper[4691]: I1124 09:30:00.236104 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lzv9g\" (UniqueName: \"kubernetes.io/projected/97c7860a-fe4f-4f1d-b25b-93c9c96ba31e-kube-api-access-lzv9g\") pod \"collect-profiles-29399610-kck2c\" (UID: \"97c7860a-fe4f-4f1d-b25b-93c9c96ba31e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399610-kck2c" Nov 24 09:30:00 crc kubenswrapper[4691]: I1124 09:30:00.236179 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/97c7860a-fe4f-4f1d-b25b-93c9c96ba31e-secret-volume\") pod \"collect-profiles-29399610-kck2c\" (UID: \"97c7860a-fe4f-4f1d-b25b-93c9c96ba31e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399610-kck2c" Nov 24 09:30:00 crc kubenswrapper[4691]: I1124 09:30:00.236371 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/97c7860a-fe4f-4f1d-b25b-93c9c96ba31e-config-volume\") pod \"collect-profiles-29399610-kck2c\" (UID: \"97c7860a-fe4f-4f1d-b25b-93c9c96ba31e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399610-kck2c" Nov 24 09:30:00 crc kubenswrapper[4691]: I1124 09:30:00.337869 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/97c7860a-fe4f-4f1d-b25b-93c9c96ba31e-config-volume\") pod \"collect-profiles-29399610-kck2c\" (UID: \"97c7860a-fe4f-4f1d-b25b-93c9c96ba31e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399610-kck2c" Nov 24 09:30:00 crc kubenswrapper[4691]: I1124 09:30:00.337942 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lzv9g\" (UniqueName: \"kubernetes.io/projected/97c7860a-fe4f-4f1d-b25b-93c9c96ba31e-kube-api-access-lzv9g\") pod \"collect-profiles-29399610-kck2c\" (UID: \"97c7860a-fe4f-4f1d-b25b-93c9c96ba31e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399610-kck2c" Nov 24 09:30:00 crc kubenswrapper[4691]: I1124 09:30:00.337967 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/97c7860a-fe4f-4f1d-b25b-93c9c96ba31e-secret-volume\") pod \"collect-profiles-29399610-kck2c\" (UID: \"97c7860a-fe4f-4f1d-b25b-93c9c96ba31e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399610-kck2c" Nov 24 09:30:00 crc kubenswrapper[4691]: I1124 09:30:00.339522 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/97c7860a-fe4f-4f1d-b25b-93c9c96ba31e-config-volume\") pod \"collect-profiles-29399610-kck2c\" (UID: \"97c7860a-fe4f-4f1d-b25b-93c9c96ba31e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399610-kck2c" Nov 24 09:30:00 crc kubenswrapper[4691]: I1124 09:30:00.348220 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/97c7860a-fe4f-4f1d-b25b-93c9c96ba31e-secret-volume\") pod \"collect-profiles-29399610-kck2c\" (UID: \"97c7860a-fe4f-4f1d-b25b-93c9c96ba31e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399610-kck2c" Nov 24 09:30:00 crc kubenswrapper[4691]: I1124 09:30:00.356072 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lzv9g\" (UniqueName: \"kubernetes.io/projected/97c7860a-fe4f-4f1d-b25b-93c9c96ba31e-kube-api-access-lzv9g\") pod \"collect-profiles-29399610-kck2c\" (UID: \"97c7860a-fe4f-4f1d-b25b-93c9c96ba31e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29399610-kck2c" Nov 24 09:30:00 crc kubenswrapper[4691]: I1124 09:30:00.487823 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399610-kck2c" Nov 24 09:30:00 crc kubenswrapper[4691]: I1124 09:30:00.971774 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399610-kck2c"] Nov 24 09:30:01 crc kubenswrapper[4691]: I1124 09:30:01.724774 4691 generic.go:334] "Generic (PLEG): container finished" podID="97c7860a-fe4f-4f1d-b25b-93c9c96ba31e" containerID="cd29219dfcbe24db20f6bd7fe652927c0f9588344326b099c965de55858652fb" exitCode=0 Nov 24 09:30:01 crc kubenswrapper[4691]: I1124 09:30:01.724835 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399610-kck2c" event={"ID":"97c7860a-fe4f-4f1d-b25b-93c9c96ba31e","Type":"ContainerDied","Data":"cd29219dfcbe24db20f6bd7fe652927c0f9588344326b099c965de55858652fb"} Nov 24 09:30:01 crc kubenswrapper[4691]: I1124 09:30:01.724896 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399610-kck2c" event={"ID":"97c7860a-fe4f-4f1d-b25b-93c9c96ba31e","Type":"ContainerStarted","Data":"44939c0e60a87d523ac9972c3f27a0763c9548f1b68fec20c0028a8c564681c1"} Nov 24 09:30:03 crc kubenswrapper[4691]: I1124 09:30:03.097420 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399610-kck2c" Nov 24 09:30:03 crc kubenswrapper[4691]: I1124 09:30:03.195048 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/97c7860a-fe4f-4f1d-b25b-93c9c96ba31e-config-volume\") pod \"97c7860a-fe4f-4f1d-b25b-93c9c96ba31e\" (UID: \"97c7860a-fe4f-4f1d-b25b-93c9c96ba31e\") " Nov 24 09:30:03 crc kubenswrapper[4691]: I1124 09:30:03.196466 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/97c7860a-fe4f-4f1d-b25b-93c9c96ba31e-secret-volume\") pod \"97c7860a-fe4f-4f1d-b25b-93c9c96ba31e\" (UID: \"97c7860a-fe4f-4f1d-b25b-93c9c96ba31e\") " Nov 24 09:30:03 crc kubenswrapper[4691]: I1124 09:30:03.196485 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/97c7860a-fe4f-4f1d-b25b-93c9c96ba31e-config-volume" (OuterVolumeSpecName: "config-volume") pod "97c7860a-fe4f-4f1d-b25b-93c9c96ba31e" (UID: "97c7860a-fe4f-4f1d-b25b-93c9c96ba31e"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 24 09:30:03 crc kubenswrapper[4691]: I1124 09:30:03.196549 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzv9g\" (UniqueName: \"kubernetes.io/projected/97c7860a-fe4f-4f1d-b25b-93c9c96ba31e-kube-api-access-lzv9g\") pod \"97c7860a-fe4f-4f1d-b25b-93c9c96ba31e\" (UID: \"97c7860a-fe4f-4f1d-b25b-93c9c96ba31e\") " Nov 24 09:30:03 crc kubenswrapper[4691]: I1124 09:30:03.197220 4691 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/97c7860a-fe4f-4f1d-b25b-93c9c96ba31e-config-volume\") on node \"crc\" DevicePath \"\"" Nov 24 09:30:03 crc kubenswrapper[4691]: I1124 09:30:03.203619 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97c7860a-fe4f-4f1d-b25b-93c9c96ba31e-kube-api-access-lzv9g" (OuterVolumeSpecName: "kube-api-access-lzv9g") pod "97c7860a-fe4f-4f1d-b25b-93c9c96ba31e" (UID: "97c7860a-fe4f-4f1d-b25b-93c9c96ba31e"). InnerVolumeSpecName "kube-api-access-lzv9g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 09:30:03 crc kubenswrapper[4691]: I1124 09:30:03.205721 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97c7860a-fe4f-4f1d-b25b-93c9c96ba31e-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "97c7860a-fe4f-4f1d-b25b-93c9c96ba31e" (UID: "97c7860a-fe4f-4f1d-b25b-93c9c96ba31e"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 24 09:30:03 crc kubenswrapper[4691]: I1124 09:30:03.299843 4691 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/97c7860a-fe4f-4f1d-b25b-93c9c96ba31e-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 24 09:30:03 crc kubenswrapper[4691]: I1124 09:30:03.299921 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzv9g\" (UniqueName: \"kubernetes.io/projected/97c7860a-fe4f-4f1d-b25b-93c9c96ba31e-kube-api-access-lzv9g\") on node \"crc\" DevicePath \"\"" Nov 24 09:30:03 crc kubenswrapper[4691]: I1124 09:30:03.743260 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29399610-kck2c" event={"ID":"97c7860a-fe4f-4f1d-b25b-93c9c96ba31e","Type":"ContainerDied","Data":"44939c0e60a87d523ac9972c3f27a0763c9548f1b68fec20c0028a8c564681c1"} Nov 24 09:30:03 crc kubenswrapper[4691]: I1124 09:30:03.743575 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="44939c0e60a87d523ac9972c3f27a0763c9548f1b68fec20c0028a8c564681c1" Nov 24 09:30:03 crc kubenswrapper[4691]: I1124 09:30:03.743341 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29399610-kck2c" Nov 24 09:30:04 crc kubenswrapper[4691]: I1124 09:30:04.185569 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399565-77j4v"] Nov 24 09:30:04 crc kubenswrapper[4691]: I1124 09:30:04.197509 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29399565-77j4v"] Nov 24 09:30:04 crc kubenswrapper[4691]: I1124 09:30:04.786045 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="181c94c0-135e-4fdf-ab39-fd0326a29b74" path="/var/lib/kubelet/pods/181c94c0-135e-4fdf-ab39-fd0326a29b74/volumes" Nov 24 09:30:25 crc kubenswrapper[4691]: I1124 09:30:25.423729 4691 scope.go:117] "RemoveContainer" containerID="b737ee701dbd48044a7f68c307e1ec912aab41467076cbbd5c627b2ab059476b" Nov 24 09:30:38 crc kubenswrapper[4691]: I1124 09:30:38.110724 4691 generic.go:334] "Generic (PLEG): container finished" podID="295248d6-7aed-4269-ad13-efd75ab5499b" containerID="c84d4b4f7efe496e576f82873bd7dbf5a08563001c970cd00b5ae8de1df0992a" exitCode=0 Nov 24 09:30:38 crc kubenswrapper[4691]: I1124 09:30:38.110793 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-qczfr/must-gather-fx258" event={"ID":"295248d6-7aed-4269-ad13-efd75ab5499b","Type":"ContainerDied","Data":"c84d4b4f7efe496e576f82873bd7dbf5a08563001c970cd00b5ae8de1df0992a"} Nov 24 09:30:38 crc kubenswrapper[4691]: I1124 09:30:38.112315 4691 scope.go:117] "RemoveContainer" containerID="c84d4b4f7efe496e576f82873bd7dbf5a08563001c970cd00b5ae8de1df0992a" Nov 24 09:30:38 crc kubenswrapper[4691]: I1124 09:30:38.535307 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-qczfr_must-gather-fx258_295248d6-7aed-4269-ad13-efd75ab5499b/gather/0.log" Nov 24 09:30:46 crc kubenswrapper[4691]: I1124 09:30:46.746532 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-qczfr/must-gather-fx258"] Nov 24 09:30:46 crc kubenswrapper[4691]: I1124 09:30:46.747330 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-qczfr/must-gather-fx258" podUID="295248d6-7aed-4269-ad13-efd75ab5499b" containerName="copy" containerID="cri-o://7d60996796e71bd913a9f7b2cc1ed14f7d4ee31c36eb57e7ff983e784fa6051d" gracePeriod=2 Nov 24 09:30:46 crc kubenswrapper[4691]: I1124 09:30:46.754779 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-qczfr/must-gather-fx258"] Nov 24 09:30:47 crc kubenswrapper[4691]: I1124 09:30:47.203213 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-qczfr_must-gather-fx258_295248d6-7aed-4269-ad13-efd75ab5499b/copy/0.log" Nov 24 09:30:47 crc kubenswrapper[4691]: I1124 09:30:47.203946 4691 generic.go:334] "Generic (PLEG): container finished" podID="295248d6-7aed-4269-ad13-efd75ab5499b" containerID="7d60996796e71bd913a9f7b2cc1ed14f7d4ee31c36eb57e7ff983e784fa6051d" exitCode=143 Nov 24 09:30:47 crc kubenswrapper[4691]: I1124 09:30:47.431900 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-qczfr_must-gather-fx258_295248d6-7aed-4269-ad13-efd75ab5499b/copy/0.log" Nov 24 09:30:47 crc kubenswrapper[4691]: I1124 09:30:47.432493 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qczfr/must-gather-fx258" Nov 24 09:30:47 crc kubenswrapper[4691]: I1124 09:30:47.546947 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/295248d6-7aed-4269-ad13-efd75ab5499b-must-gather-output\") pod \"295248d6-7aed-4269-ad13-efd75ab5499b\" (UID: \"295248d6-7aed-4269-ad13-efd75ab5499b\") " Nov 24 09:30:47 crc kubenswrapper[4691]: I1124 09:30:47.547470 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w6rcj\" (UniqueName: \"kubernetes.io/projected/295248d6-7aed-4269-ad13-efd75ab5499b-kube-api-access-w6rcj\") pod \"295248d6-7aed-4269-ad13-efd75ab5499b\" (UID: \"295248d6-7aed-4269-ad13-efd75ab5499b\") " Nov 24 09:30:47 crc kubenswrapper[4691]: I1124 09:30:47.555102 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/295248d6-7aed-4269-ad13-efd75ab5499b-kube-api-access-w6rcj" (OuterVolumeSpecName: "kube-api-access-w6rcj") pod "295248d6-7aed-4269-ad13-efd75ab5499b" (UID: "295248d6-7aed-4269-ad13-efd75ab5499b"). InnerVolumeSpecName "kube-api-access-w6rcj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 09:30:47 crc kubenswrapper[4691]: I1124 09:30:47.649977 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w6rcj\" (UniqueName: \"kubernetes.io/projected/295248d6-7aed-4269-ad13-efd75ab5499b-kube-api-access-w6rcj\") on node \"crc\" DevicePath \"\"" Nov 24 09:30:47 crc kubenswrapper[4691]: I1124 09:30:47.706577 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/295248d6-7aed-4269-ad13-efd75ab5499b-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "295248d6-7aed-4269-ad13-efd75ab5499b" (UID: "295248d6-7aed-4269-ad13-efd75ab5499b"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 09:30:47 crc kubenswrapper[4691]: I1124 09:30:47.752307 4691 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/295248d6-7aed-4269-ad13-efd75ab5499b-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 24 09:30:48 crc kubenswrapper[4691]: I1124 09:30:48.220194 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-qczfr_must-gather-fx258_295248d6-7aed-4269-ad13-efd75ab5499b/copy/0.log" Nov 24 09:30:48 crc kubenswrapper[4691]: I1124 09:30:48.220653 4691 scope.go:117] "RemoveContainer" containerID="7d60996796e71bd913a9f7b2cc1ed14f7d4ee31c36eb57e7ff983e784fa6051d" Nov 24 09:30:48 crc kubenswrapper[4691]: I1124 09:30:48.220859 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qczfr/must-gather-fx258" Nov 24 09:30:48 crc kubenswrapper[4691]: I1124 09:30:48.260933 4691 scope.go:117] "RemoveContainer" containerID="c84d4b4f7efe496e576f82873bd7dbf5a08563001c970cd00b5ae8de1df0992a" Nov 24 09:30:48 crc kubenswrapper[4691]: I1124 09:30:48.775299 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="295248d6-7aed-4269-ad13-efd75ab5499b" path="/var/lib/kubelet/pods/295248d6-7aed-4269-ad13-efd75ab5499b/volumes" Nov 24 09:31:08 crc kubenswrapper[4691]: I1124 09:31:08.587036 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-djtvj"] Nov 24 09:31:08 crc kubenswrapper[4691]: E1124 09:31:08.588104 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="295248d6-7aed-4269-ad13-efd75ab5499b" containerName="copy" Nov 24 09:31:08 crc kubenswrapper[4691]: I1124 09:31:08.588124 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="295248d6-7aed-4269-ad13-efd75ab5499b" containerName="copy" Nov 24 09:31:08 crc kubenswrapper[4691]: E1124 09:31:08.588162 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="295248d6-7aed-4269-ad13-efd75ab5499b" containerName="gather" Nov 24 09:31:08 crc kubenswrapper[4691]: I1124 09:31:08.588169 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="295248d6-7aed-4269-ad13-efd75ab5499b" containerName="gather" Nov 24 09:31:08 crc kubenswrapper[4691]: E1124 09:31:08.588197 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97c7860a-fe4f-4f1d-b25b-93c9c96ba31e" containerName="collect-profiles" Nov 24 09:31:08 crc kubenswrapper[4691]: I1124 09:31:08.588205 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="97c7860a-fe4f-4f1d-b25b-93c9c96ba31e" containerName="collect-profiles" Nov 24 09:31:08 crc kubenswrapper[4691]: I1124 09:31:08.588492 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="295248d6-7aed-4269-ad13-efd75ab5499b" containerName="gather" Nov 24 09:31:08 crc kubenswrapper[4691]: I1124 09:31:08.588510 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="295248d6-7aed-4269-ad13-efd75ab5499b" containerName="copy" Nov 24 09:31:08 crc kubenswrapper[4691]: I1124 09:31:08.588522 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="97c7860a-fe4f-4f1d-b25b-93c9c96ba31e" containerName="collect-profiles" Nov 24 09:31:08 crc kubenswrapper[4691]: I1124 09:31:08.590204 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-djtvj" Nov 24 09:31:08 crc kubenswrapper[4691]: I1124 09:31:08.598743 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-djtvj"] Nov 24 09:31:08 crc kubenswrapper[4691]: I1124 09:31:08.613366 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef-catalog-content\") pod \"redhat-marketplace-djtvj\" (UID: \"b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef\") " pod="openshift-marketplace/redhat-marketplace-djtvj" Nov 24 09:31:08 crc kubenswrapper[4691]: I1124 09:31:08.613650 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-85fxr\" (UniqueName: \"kubernetes.io/projected/b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef-kube-api-access-85fxr\") pod \"redhat-marketplace-djtvj\" (UID: \"b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef\") " pod="openshift-marketplace/redhat-marketplace-djtvj" Nov 24 09:31:08 crc kubenswrapper[4691]: I1124 09:31:08.613740 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef-utilities\") pod \"redhat-marketplace-djtvj\" (UID: \"b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef\") " pod="openshift-marketplace/redhat-marketplace-djtvj" Nov 24 09:31:08 crc kubenswrapper[4691]: I1124 09:31:08.715499 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-85fxr\" (UniqueName: \"kubernetes.io/projected/b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef-kube-api-access-85fxr\") pod \"redhat-marketplace-djtvj\" (UID: \"b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef\") " pod="openshift-marketplace/redhat-marketplace-djtvj" Nov 24 09:31:08 crc kubenswrapper[4691]: I1124 09:31:08.715564 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef-utilities\") pod \"redhat-marketplace-djtvj\" (UID: \"b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef\") " pod="openshift-marketplace/redhat-marketplace-djtvj" Nov 24 09:31:08 crc kubenswrapper[4691]: I1124 09:31:08.715735 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef-catalog-content\") pod \"redhat-marketplace-djtvj\" (UID: \"b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef\") " pod="openshift-marketplace/redhat-marketplace-djtvj" Nov 24 09:31:08 crc kubenswrapper[4691]: I1124 09:31:08.716177 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef-utilities\") pod \"redhat-marketplace-djtvj\" (UID: \"b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef\") " pod="openshift-marketplace/redhat-marketplace-djtvj" Nov 24 09:31:08 crc kubenswrapper[4691]: I1124 09:31:08.716284 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef-catalog-content\") pod \"redhat-marketplace-djtvj\" (UID: \"b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef\") " pod="openshift-marketplace/redhat-marketplace-djtvj" Nov 24 09:31:08 crc kubenswrapper[4691]: I1124 09:31:08.744322 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-85fxr\" (UniqueName: \"kubernetes.io/projected/b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef-kube-api-access-85fxr\") pod \"redhat-marketplace-djtvj\" (UID: \"b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef\") " pod="openshift-marketplace/redhat-marketplace-djtvj" Nov 24 09:31:08 crc kubenswrapper[4691]: I1124 09:31:08.919172 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-djtvj" Nov 24 09:31:09 crc kubenswrapper[4691]: I1124 09:31:09.458889 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-djtvj"] Nov 24 09:31:10 crc kubenswrapper[4691]: I1124 09:31:10.418949 4691 generic.go:334] "Generic (PLEG): container finished" podID="b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef" containerID="589d3b35f687ac4134297c68150a04024e79fa6f223740055603bf618b574d21" exitCode=0 Nov 24 09:31:10 crc kubenswrapper[4691]: I1124 09:31:10.419187 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-djtvj" event={"ID":"b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef","Type":"ContainerDied","Data":"589d3b35f687ac4134297c68150a04024e79fa6f223740055603bf618b574d21"} Nov 24 09:31:10 crc kubenswrapper[4691]: I1124 09:31:10.419350 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-djtvj" event={"ID":"b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef","Type":"ContainerStarted","Data":"5dc38c73e642d1ce245182c04d6604904a51a33e73175650d8c903ca9a5d954d"} Nov 24 09:31:10 crc kubenswrapper[4691]: I1124 09:31:10.423407 4691 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 09:31:11 crc kubenswrapper[4691]: I1124 09:31:11.433582 4691 generic.go:334] "Generic (PLEG): container finished" podID="b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef" containerID="9237bfca3582919d47befacc61d8145bd8c40e14c04ad7b780a84917b7ffc6dc" exitCode=0 Nov 24 09:31:11 crc kubenswrapper[4691]: I1124 09:31:11.433646 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-djtvj" event={"ID":"b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef","Type":"ContainerDied","Data":"9237bfca3582919d47befacc61d8145bd8c40e14c04ad7b780a84917b7ffc6dc"} Nov 24 09:31:12 crc kubenswrapper[4691]: I1124 09:31:12.444091 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-djtvj" event={"ID":"b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef","Type":"ContainerStarted","Data":"1b4520ede90ee60667a46226187ab6582240ef20fb4a174f55e183f34626e81a"} Nov 24 09:31:12 crc kubenswrapper[4691]: I1124 09:31:12.476383 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-djtvj" podStartSLOduration=2.8469676120000003 podStartE2EDuration="4.476362621s" podCreationTimestamp="2025-11-24 09:31:08 +0000 UTC" firstStartedPulling="2025-11-24 09:31:10.422989745 +0000 UTC m=+5632.421939014" lastFinishedPulling="2025-11-24 09:31:12.052384774 +0000 UTC m=+5634.051334023" observedRunningTime="2025-11-24 09:31:12.468135026 +0000 UTC m=+5634.467084285" watchObservedRunningTime="2025-11-24 09:31:12.476362621 +0000 UTC m=+5634.475311880" Nov 24 09:31:18 crc kubenswrapper[4691]: I1124 09:31:18.919498 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-djtvj" Nov 24 09:31:18 crc kubenswrapper[4691]: I1124 09:31:18.919974 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-djtvj" Nov 24 09:31:18 crc kubenswrapper[4691]: I1124 09:31:18.962303 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-djtvj" Nov 24 09:31:19 crc kubenswrapper[4691]: I1124 09:31:19.583558 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-djtvj" Nov 24 09:31:19 crc kubenswrapper[4691]: I1124 09:31:19.644160 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-djtvj"] Nov 24 09:31:21 crc kubenswrapper[4691]: I1124 09:31:21.089014 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 09:31:21 crc kubenswrapper[4691]: I1124 09:31:21.089552 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 09:31:21 crc kubenswrapper[4691]: I1124 09:31:21.541372 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-djtvj" podUID="b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef" containerName="registry-server" containerID="cri-o://1b4520ede90ee60667a46226187ab6582240ef20fb4a174f55e183f34626e81a" gracePeriod=2 Nov 24 09:31:22 crc kubenswrapper[4691]: I1124 09:31:22.003730 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-djtvj" Nov 24 09:31:22 crc kubenswrapper[4691]: I1124 09:31:22.099245 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef-utilities\") pod \"b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef\" (UID: \"b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef\") " Nov 24 09:31:22 crc kubenswrapper[4691]: I1124 09:31:22.099387 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-85fxr\" (UniqueName: \"kubernetes.io/projected/b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef-kube-api-access-85fxr\") pod \"b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef\" (UID: \"b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef\") " Nov 24 09:31:22 crc kubenswrapper[4691]: I1124 09:31:22.099500 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef-catalog-content\") pod \"b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef\" (UID: \"b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef\") " Nov 24 09:31:22 crc kubenswrapper[4691]: I1124 09:31:22.100786 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef-utilities" (OuterVolumeSpecName: "utilities") pod "b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef" (UID: "b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 09:31:22 crc kubenswrapper[4691]: I1124 09:31:22.119950 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef-kube-api-access-85fxr" (OuterVolumeSpecName: "kube-api-access-85fxr") pod "b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef" (UID: "b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef"). InnerVolumeSpecName "kube-api-access-85fxr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 09:31:22 crc kubenswrapper[4691]: I1124 09:31:22.120502 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef" (UID: "b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 09:31:22 crc kubenswrapper[4691]: I1124 09:31:22.202252 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 09:31:22 crc kubenswrapper[4691]: I1124 09:31:22.202288 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-85fxr\" (UniqueName: \"kubernetes.io/projected/b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef-kube-api-access-85fxr\") on node \"crc\" DevicePath \"\"" Nov 24 09:31:22 crc kubenswrapper[4691]: I1124 09:31:22.202301 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 09:31:22 crc kubenswrapper[4691]: I1124 09:31:22.555267 4691 generic.go:334] "Generic (PLEG): container finished" podID="b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef" containerID="1b4520ede90ee60667a46226187ab6582240ef20fb4a174f55e183f34626e81a" exitCode=0 Nov 24 09:31:22 crc kubenswrapper[4691]: I1124 09:31:22.555327 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-djtvj" event={"ID":"b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef","Type":"ContainerDied","Data":"1b4520ede90ee60667a46226187ab6582240ef20fb4a174f55e183f34626e81a"} Nov 24 09:31:22 crc kubenswrapper[4691]: I1124 09:31:22.555361 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-djtvj" event={"ID":"b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef","Type":"ContainerDied","Data":"5dc38c73e642d1ce245182c04d6604904a51a33e73175650d8c903ca9a5d954d"} Nov 24 09:31:22 crc kubenswrapper[4691]: I1124 09:31:22.555383 4691 scope.go:117] "RemoveContainer" containerID="1b4520ede90ee60667a46226187ab6582240ef20fb4a174f55e183f34626e81a" Nov 24 09:31:22 crc kubenswrapper[4691]: I1124 09:31:22.555379 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-djtvj" Nov 24 09:31:22 crc kubenswrapper[4691]: I1124 09:31:22.590399 4691 scope.go:117] "RemoveContainer" containerID="9237bfca3582919d47befacc61d8145bd8c40e14c04ad7b780a84917b7ffc6dc" Nov 24 09:31:22 crc kubenswrapper[4691]: I1124 09:31:22.616589 4691 scope.go:117] "RemoveContainer" containerID="589d3b35f687ac4134297c68150a04024e79fa6f223740055603bf618b574d21" Nov 24 09:31:22 crc kubenswrapper[4691]: I1124 09:31:22.618854 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-djtvj"] Nov 24 09:31:22 crc kubenswrapper[4691]: I1124 09:31:22.631561 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-djtvj"] Nov 24 09:31:22 crc kubenswrapper[4691]: I1124 09:31:22.665841 4691 scope.go:117] "RemoveContainer" containerID="1b4520ede90ee60667a46226187ab6582240ef20fb4a174f55e183f34626e81a" Nov 24 09:31:22 crc kubenswrapper[4691]: E1124 09:31:22.666390 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1b4520ede90ee60667a46226187ab6582240ef20fb4a174f55e183f34626e81a\": container with ID starting with 1b4520ede90ee60667a46226187ab6582240ef20fb4a174f55e183f34626e81a not found: ID does not exist" containerID="1b4520ede90ee60667a46226187ab6582240ef20fb4a174f55e183f34626e81a" Nov 24 09:31:22 crc kubenswrapper[4691]: I1124 09:31:22.666481 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b4520ede90ee60667a46226187ab6582240ef20fb4a174f55e183f34626e81a"} err="failed to get container status \"1b4520ede90ee60667a46226187ab6582240ef20fb4a174f55e183f34626e81a\": rpc error: code = NotFound desc = could not find container \"1b4520ede90ee60667a46226187ab6582240ef20fb4a174f55e183f34626e81a\": container with ID starting with 1b4520ede90ee60667a46226187ab6582240ef20fb4a174f55e183f34626e81a not found: ID does not exist" Nov 24 09:31:22 crc kubenswrapper[4691]: I1124 09:31:22.666517 4691 scope.go:117] "RemoveContainer" containerID="9237bfca3582919d47befacc61d8145bd8c40e14c04ad7b780a84917b7ffc6dc" Nov 24 09:31:22 crc kubenswrapper[4691]: E1124 09:31:22.666867 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9237bfca3582919d47befacc61d8145bd8c40e14c04ad7b780a84917b7ffc6dc\": container with ID starting with 9237bfca3582919d47befacc61d8145bd8c40e14c04ad7b780a84917b7ffc6dc not found: ID does not exist" containerID="9237bfca3582919d47befacc61d8145bd8c40e14c04ad7b780a84917b7ffc6dc" Nov 24 09:31:22 crc kubenswrapper[4691]: I1124 09:31:22.666982 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9237bfca3582919d47befacc61d8145bd8c40e14c04ad7b780a84917b7ffc6dc"} err="failed to get container status \"9237bfca3582919d47befacc61d8145bd8c40e14c04ad7b780a84917b7ffc6dc\": rpc error: code = NotFound desc = could not find container \"9237bfca3582919d47befacc61d8145bd8c40e14c04ad7b780a84917b7ffc6dc\": container with ID starting with 9237bfca3582919d47befacc61d8145bd8c40e14c04ad7b780a84917b7ffc6dc not found: ID does not exist" Nov 24 09:31:22 crc kubenswrapper[4691]: I1124 09:31:22.667085 4691 scope.go:117] "RemoveContainer" containerID="589d3b35f687ac4134297c68150a04024e79fa6f223740055603bf618b574d21" Nov 24 09:31:22 crc kubenswrapper[4691]: E1124 09:31:22.667795 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"589d3b35f687ac4134297c68150a04024e79fa6f223740055603bf618b574d21\": container with ID starting with 589d3b35f687ac4134297c68150a04024e79fa6f223740055603bf618b574d21 not found: ID does not exist" containerID="589d3b35f687ac4134297c68150a04024e79fa6f223740055603bf618b574d21" Nov 24 09:31:22 crc kubenswrapper[4691]: I1124 09:31:22.667973 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"589d3b35f687ac4134297c68150a04024e79fa6f223740055603bf618b574d21"} err="failed to get container status \"589d3b35f687ac4134297c68150a04024e79fa6f223740055603bf618b574d21\": rpc error: code = NotFound desc = could not find container \"589d3b35f687ac4134297c68150a04024e79fa6f223740055603bf618b574d21\": container with ID starting with 589d3b35f687ac4134297c68150a04024e79fa6f223740055603bf618b574d21 not found: ID does not exist" Nov 24 09:31:22 crc kubenswrapper[4691]: I1124 09:31:22.775410 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef" path="/var/lib/kubelet/pods/b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef/volumes" Nov 24 09:31:51 crc kubenswrapper[4691]: I1124 09:31:51.089549 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 09:31:51 crc kubenswrapper[4691]: I1124 09:31:51.090155 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 09:32:21 crc kubenswrapper[4691]: I1124 09:32:21.089882 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 09:32:21 crc kubenswrapper[4691]: I1124 09:32:21.090819 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 09:32:21 crc kubenswrapper[4691]: I1124 09:32:21.090902 4691 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 09:32:21 crc kubenswrapper[4691]: I1124 09:32:21.092235 4691 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9d5a7ae14d2062d9b51ce6e4e46f00f60586eef65cf9e6e4ed8bcf4d441bfe6c"} pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 09:32:21 crc kubenswrapper[4691]: I1124 09:32:21.092380 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" containerID="cri-o://9d5a7ae14d2062d9b51ce6e4e46f00f60586eef65cf9e6e4ed8bcf4d441bfe6c" gracePeriod=600 Nov 24 09:32:21 crc kubenswrapper[4691]: E1124 09:32:21.218833 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:32:22 crc kubenswrapper[4691]: I1124 09:32:22.118490 4691 generic.go:334] "Generic (PLEG): container finished" podID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerID="9d5a7ae14d2062d9b51ce6e4e46f00f60586eef65cf9e6e4ed8bcf4d441bfe6c" exitCode=0 Nov 24 09:32:22 crc kubenswrapper[4691]: I1124 09:32:22.118590 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerDied","Data":"9d5a7ae14d2062d9b51ce6e4e46f00f60586eef65cf9e6e4ed8bcf4d441bfe6c"} Nov 24 09:32:22 crc kubenswrapper[4691]: I1124 09:32:22.119011 4691 scope.go:117] "RemoveContainer" containerID="66a56f3fd5650c8a5dc80218e98268dc9ad3afeff230b247f513bb7c5ff0fe3d" Nov 24 09:32:22 crc kubenswrapper[4691]: I1124 09:32:22.119752 4691 scope.go:117] "RemoveContainer" containerID="9d5a7ae14d2062d9b51ce6e4e46f00f60586eef65cf9e6e4ed8bcf4d441bfe6c" Nov 24 09:32:22 crc kubenswrapper[4691]: E1124 09:32:22.120117 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:32:25 crc kubenswrapper[4691]: I1124 09:32:25.536173 4691 scope.go:117] "RemoveContainer" containerID="ea006c30efd3fd32e3f0daa6be8084538a4399a5de2b8c4cd11a10912fc8632b" Nov 24 09:32:35 crc kubenswrapper[4691]: I1124 09:32:35.760411 4691 scope.go:117] "RemoveContainer" containerID="9d5a7ae14d2062d9b51ce6e4e46f00f60586eef65cf9e6e4ed8bcf4d441bfe6c" Nov 24 09:32:35 crc kubenswrapper[4691]: E1124 09:32:35.761106 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:32:49 crc kubenswrapper[4691]: I1124 09:32:49.760977 4691 scope.go:117] "RemoveContainer" containerID="9d5a7ae14d2062d9b51ce6e4e46f00f60586eef65cf9e6e4ed8bcf4d441bfe6c" Nov 24 09:32:49 crc kubenswrapper[4691]: E1124 09:32:49.763040 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:33:00 crc kubenswrapper[4691]: I1124 09:33:00.763729 4691 scope.go:117] "RemoveContainer" containerID="9d5a7ae14d2062d9b51ce6e4e46f00f60586eef65cf9e6e4ed8bcf4d441bfe6c" Nov 24 09:33:00 crc kubenswrapper[4691]: E1124 09:33:00.765096 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:33:14 crc kubenswrapper[4691]: I1124 09:33:14.761199 4691 scope.go:117] "RemoveContainer" containerID="9d5a7ae14d2062d9b51ce6e4e46f00f60586eef65cf9e6e4ed8bcf4d441bfe6c" Nov 24 09:33:14 crc kubenswrapper[4691]: E1124 09:33:14.762104 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:33:25 crc kubenswrapper[4691]: I1124 09:33:25.604692 4691 scope.go:117] "RemoveContainer" containerID="bf939a6d911744cd8336bb8f8580c290a95ba17ff23bb9397640b7ea9af88811" Nov 24 09:33:28 crc kubenswrapper[4691]: I1124 09:33:28.772479 4691 scope.go:117] "RemoveContainer" containerID="9d5a7ae14d2062d9b51ce6e4e46f00f60586eef65cf9e6e4ed8bcf4d441bfe6c" Nov 24 09:33:28 crc kubenswrapper[4691]: E1124 09:33:28.773555 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:33:32 crc kubenswrapper[4691]: I1124 09:33:32.024954 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-t72qw/must-gather-k4bnj"] Nov 24 09:33:32 crc kubenswrapper[4691]: E1124 09:33:32.025841 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef" containerName="extract-utilities" Nov 24 09:33:32 crc kubenswrapper[4691]: I1124 09:33:32.025854 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef" containerName="extract-utilities" Nov 24 09:33:32 crc kubenswrapper[4691]: E1124 09:33:32.025882 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef" containerName="registry-server" Nov 24 09:33:32 crc kubenswrapper[4691]: I1124 09:33:32.025888 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef" containerName="registry-server" Nov 24 09:33:32 crc kubenswrapper[4691]: E1124 09:33:32.025917 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef" containerName="extract-content" Nov 24 09:33:32 crc kubenswrapper[4691]: I1124 09:33:32.025923 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef" containerName="extract-content" Nov 24 09:33:32 crc kubenswrapper[4691]: I1124 09:33:32.026095 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="b67efbc9-b3a7-4f3c-86c7-e01105bfd0ef" containerName="registry-server" Nov 24 09:33:32 crc kubenswrapper[4691]: I1124 09:33:32.027014 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-t72qw/must-gather-k4bnj" Nov 24 09:33:32 crc kubenswrapper[4691]: I1124 09:33:32.031149 4691 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-t72qw"/"default-dockercfg-9hlv9" Nov 24 09:33:32 crc kubenswrapper[4691]: I1124 09:33:32.032572 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-t72qw"/"kube-root-ca.crt" Nov 24 09:33:32 crc kubenswrapper[4691]: I1124 09:33:32.032575 4691 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-t72qw"/"openshift-service-ca.crt" Nov 24 09:33:32 crc kubenswrapper[4691]: I1124 09:33:32.047186 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-t72qw/must-gather-k4bnj"] Nov 24 09:33:32 crc kubenswrapper[4691]: I1124 09:33:32.192671 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/0f1728cc-efe7-4b77-9fa5-604056334a1f-must-gather-output\") pod \"must-gather-k4bnj\" (UID: \"0f1728cc-efe7-4b77-9fa5-604056334a1f\") " pod="openshift-must-gather-t72qw/must-gather-k4bnj" Nov 24 09:33:32 crc kubenswrapper[4691]: I1124 09:33:32.193075 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vbv29\" (UniqueName: \"kubernetes.io/projected/0f1728cc-efe7-4b77-9fa5-604056334a1f-kube-api-access-vbv29\") pod \"must-gather-k4bnj\" (UID: \"0f1728cc-efe7-4b77-9fa5-604056334a1f\") " pod="openshift-must-gather-t72qw/must-gather-k4bnj" Nov 24 09:33:32 crc kubenswrapper[4691]: I1124 09:33:32.295112 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vbv29\" (UniqueName: \"kubernetes.io/projected/0f1728cc-efe7-4b77-9fa5-604056334a1f-kube-api-access-vbv29\") pod \"must-gather-k4bnj\" (UID: \"0f1728cc-efe7-4b77-9fa5-604056334a1f\") " pod="openshift-must-gather-t72qw/must-gather-k4bnj" Nov 24 09:33:32 crc kubenswrapper[4691]: I1124 09:33:32.295214 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/0f1728cc-efe7-4b77-9fa5-604056334a1f-must-gather-output\") pod \"must-gather-k4bnj\" (UID: \"0f1728cc-efe7-4b77-9fa5-604056334a1f\") " pod="openshift-must-gather-t72qw/must-gather-k4bnj" Nov 24 09:33:32 crc kubenswrapper[4691]: I1124 09:33:32.295807 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/0f1728cc-efe7-4b77-9fa5-604056334a1f-must-gather-output\") pod \"must-gather-k4bnj\" (UID: \"0f1728cc-efe7-4b77-9fa5-604056334a1f\") " pod="openshift-must-gather-t72qw/must-gather-k4bnj" Nov 24 09:33:32 crc kubenswrapper[4691]: I1124 09:33:32.313817 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vbv29\" (UniqueName: \"kubernetes.io/projected/0f1728cc-efe7-4b77-9fa5-604056334a1f-kube-api-access-vbv29\") pod \"must-gather-k4bnj\" (UID: \"0f1728cc-efe7-4b77-9fa5-604056334a1f\") " pod="openshift-must-gather-t72qw/must-gather-k4bnj" Nov 24 09:33:32 crc kubenswrapper[4691]: I1124 09:33:32.349011 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-t72qw/must-gather-k4bnj" Nov 24 09:33:32 crc kubenswrapper[4691]: I1124 09:33:32.845306 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-t72qw/must-gather-k4bnj"] Nov 24 09:33:32 crc kubenswrapper[4691]: I1124 09:33:32.863100 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-t72qw/must-gather-k4bnj" event={"ID":"0f1728cc-efe7-4b77-9fa5-604056334a1f","Type":"ContainerStarted","Data":"cb23de5c6f1f69d14ce2d58f4f93ce9cb5e8b51ad74083021f9c763534cb99b5"} Nov 24 09:33:33 crc kubenswrapper[4691]: I1124 09:33:33.874893 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-t72qw/must-gather-k4bnj" event={"ID":"0f1728cc-efe7-4b77-9fa5-604056334a1f","Type":"ContainerStarted","Data":"b1dbe6d19ef8767cc919b3e5ad5e4ab450f50dec394c648e0b6f7568fdb0995d"} Nov 24 09:33:33 crc kubenswrapper[4691]: I1124 09:33:33.875521 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-t72qw/must-gather-k4bnj" event={"ID":"0f1728cc-efe7-4b77-9fa5-604056334a1f","Type":"ContainerStarted","Data":"0839b9a60de1b0df703e9b3cf4dd93d84ff00c62ec535d49fe38bfb2c84369a5"} Nov 24 09:33:33 crc kubenswrapper[4691]: I1124 09:33:33.908925 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-t72qw/must-gather-k4bnj" podStartSLOduration=1.908909062 podStartE2EDuration="1.908909062s" podCreationTimestamp="2025-11-24 09:33:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 09:33:33.905776312 +0000 UTC m=+5775.904725561" watchObservedRunningTime="2025-11-24 09:33:33.908909062 +0000 UTC m=+5775.907858311" Nov 24 09:33:37 crc kubenswrapper[4691]: I1124 09:33:37.076602 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-t72qw/crc-debug-dkn27"] Nov 24 09:33:37 crc kubenswrapper[4691]: I1124 09:33:37.078697 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-t72qw/crc-debug-dkn27" Nov 24 09:33:37 crc kubenswrapper[4691]: I1124 09:33:37.200842 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/61f293cd-0fc7-4424-b740-d7096c0bce85-host\") pod \"crc-debug-dkn27\" (UID: \"61f293cd-0fc7-4424-b740-d7096c0bce85\") " pod="openshift-must-gather-t72qw/crc-debug-dkn27" Nov 24 09:33:37 crc kubenswrapper[4691]: I1124 09:33:37.201236 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x4swt\" (UniqueName: \"kubernetes.io/projected/61f293cd-0fc7-4424-b740-d7096c0bce85-kube-api-access-x4swt\") pod \"crc-debug-dkn27\" (UID: \"61f293cd-0fc7-4424-b740-d7096c0bce85\") " pod="openshift-must-gather-t72qw/crc-debug-dkn27" Nov 24 09:33:37 crc kubenswrapper[4691]: I1124 09:33:37.302941 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/61f293cd-0fc7-4424-b740-d7096c0bce85-host\") pod \"crc-debug-dkn27\" (UID: \"61f293cd-0fc7-4424-b740-d7096c0bce85\") " pod="openshift-must-gather-t72qw/crc-debug-dkn27" Nov 24 09:33:37 crc kubenswrapper[4691]: I1124 09:33:37.303108 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/61f293cd-0fc7-4424-b740-d7096c0bce85-host\") pod \"crc-debug-dkn27\" (UID: \"61f293cd-0fc7-4424-b740-d7096c0bce85\") " pod="openshift-must-gather-t72qw/crc-debug-dkn27" Nov 24 09:33:37 crc kubenswrapper[4691]: I1124 09:33:37.303131 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x4swt\" (UniqueName: \"kubernetes.io/projected/61f293cd-0fc7-4424-b740-d7096c0bce85-kube-api-access-x4swt\") pod \"crc-debug-dkn27\" (UID: \"61f293cd-0fc7-4424-b740-d7096c0bce85\") " pod="openshift-must-gather-t72qw/crc-debug-dkn27" Nov 24 09:33:37 crc kubenswrapper[4691]: I1124 09:33:37.329236 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x4swt\" (UniqueName: \"kubernetes.io/projected/61f293cd-0fc7-4424-b740-d7096c0bce85-kube-api-access-x4swt\") pod \"crc-debug-dkn27\" (UID: \"61f293cd-0fc7-4424-b740-d7096c0bce85\") " pod="openshift-must-gather-t72qw/crc-debug-dkn27" Nov 24 09:33:37 crc kubenswrapper[4691]: I1124 09:33:37.400022 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-t72qw/crc-debug-dkn27" Nov 24 09:33:37 crc kubenswrapper[4691]: I1124 09:33:37.914830 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-t72qw/crc-debug-dkn27" event={"ID":"61f293cd-0fc7-4424-b740-d7096c0bce85","Type":"ContainerStarted","Data":"dd8a1a9a463b680fc2fbf6dd88b9e192020d7aad75d5c6eaca0b44be55d58385"} Nov 24 09:33:37 crc kubenswrapper[4691]: I1124 09:33:37.915275 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-t72qw/crc-debug-dkn27" event={"ID":"61f293cd-0fc7-4424-b740-d7096c0bce85","Type":"ContainerStarted","Data":"be8436d5ec46c79a3cc174ff7123e363175a029f5dcc3ef52c196fc3cc590ae3"} Nov 24 09:33:37 crc kubenswrapper[4691]: I1124 09:33:37.948476 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-t72qw/crc-debug-dkn27" podStartSLOduration=0.948437968 podStartE2EDuration="948.437968ms" podCreationTimestamp="2025-11-24 09:33:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 09:33:37.934891353 +0000 UTC m=+5779.933840612" watchObservedRunningTime="2025-11-24 09:33:37.948437968 +0000 UTC m=+5779.947387217" Nov 24 09:33:42 crc kubenswrapper[4691]: I1124 09:33:42.760677 4691 scope.go:117] "RemoveContainer" containerID="9d5a7ae14d2062d9b51ce6e4e46f00f60586eef65cf9e6e4ed8bcf4d441bfe6c" Nov 24 09:33:42 crc kubenswrapper[4691]: E1124 09:33:42.761557 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:33:56 crc kubenswrapper[4691]: I1124 09:33:56.760623 4691 scope.go:117] "RemoveContainer" containerID="9d5a7ae14d2062d9b51ce6e4e46f00f60586eef65cf9e6e4ed8bcf4d441bfe6c" Nov 24 09:33:56 crc kubenswrapper[4691]: E1124 09:33:56.761469 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:34:08 crc kubenswrapper[4691]: I1124 09:34:08.769375 4691 scope.go:117] "RemoveContainer" containerID="9d5a7ae14d2062d9b51ce6e4e46f00f60586eef65cf9e6e4ed8bcf4d441bfe6c" Nov 24 09:34:08 crc kubenswrapper[4691]: E1124 09:34:08.770205 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:34:18 crc kubenswrapper[4691]: I1124 09:34:18.297623 4691 generic.go:334] "Generic (PLEG): container finished" podID="61f293cd-0fc7-4424-b740-d7096c0bce85" containerID="dd8a1a9a463b680fc2fbf6dd88b9e192020d7aad75d5c6eaca0b44be55d58385" exitCode=0 Nov 24 09:34:18 crc kubenswrapper[4691]: I1124 09:34:18.297730 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-t72qw/crc-debug-dkn27" event={"ID":"61f293cd-0fc7-4424-b740-d7096c0bce85","Type":"ContainerDied","Data":"dd8a1a9a463b680fc2fbf6dd88b9e192020d7aad75d5c6eaca0b44be55d58385"} Nov 24 09:34:19 crc kubenswrapper[4691]: I1124 09:34:19.419269 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-t72qw/crc-debug-dkn27" Nov 24 09:34:19 crc kubenswrapper[4691]: I1124 09:34:19.451985 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-t72qw/crc-debug-dkn27"] Nov 24 09:34:19 crc kubenswrapper[4691]: I1124 09:34:19.462960 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-t72qw/crc-debug-dkn27"] Nov 24 09:34:19 crc kubenswrapper[4691]: I1124 09:34:19.535385 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4swt\" (UniqueName: \"kubernetes.io/projected/61f293cd-0fc7-4424-b740-d7096c0bce85-kube-api-access-x4swt\") pod \"61f293cd-0fc7-4424-b740-d7096c0bce85\" (UID: \"61f293cd-0fc7-4424-b740-d7096c0bce85\") " Nov 24 09:34:19 crc kubenswrapper[4691]: I1124 09:34:19.535520 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/61f293cd-0fc7-4424-b740-d7096c0bce85-host\") pod \"61f293cd-0fc7-4424-b740-d7096c0bce85\" (UID: \"61f293cd-0fc7-4424-b740-d7096c0bce85\") " Nov 24 09:34:19 crc kubenswrapper[4691]: I1124 09:34:19.535632 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/61f293cd-0fc7-4424-b740-d7096c0bce85-host" (OuterVolumeSpecName: "host") pod "61f293cd-0fc7-4424-b740-d7096c0bce85" (UID: "61f293cd-0fc7-4424-b740-d7096c0bce85"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 09:34:19 crc kubenswrapper[4691]: I1124 09:34:19.536175 4691 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/61f293cd-0fc7-4424-b740-d7096c0bce85-host\") on node \"crc\" DevicePath \"\"" Nov 24 09:34:19 crc kubenswrapper[4691]: I1124 09:34:19.541685 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61f293cd-0fc7-4424-b740-d7096c0bce85-kube-api-access-x4swt" (OuterVolumeSpecName: "kube-api-access-x4swt") pod "61f293cd-0fc7-4424-b740-d7096c0bce85" (UID: "61f293cd-0fc7-4424-b740-d7096c0bce85"). InnerVolumeSpecName "kube-api-access-x4swt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 09:34:19 crc kubenswrapper[4691]: I1124 09:34:19.637956 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4swt\" (UniqueName: \"kubernetes.io/projected/61f293cd-0fc7-4424-b740-d7096c0bce85-kube-api-access-x4swt\") on node \"crc\" DevicePath \"\"" Nov 24 09:34:20 crc kubenswrapper[4691]: I1124 09:34:20.315026 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="be8436d5ec46c79a3cc174ff7123e363175a029f5dcc3ef52c196fc3cc590ae3" Nov 24 09:34:20 crc kubenswrapper[4691]: I1124 09:34:20.315096 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-t72qw/crc-debug-dkn27" Nov 24 09:34:20 crc kubenswrapper[4691]: I1124 09:34:20.642886 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-t72qw/crc-debug-xt67p"] Nov 24 09:34:20 crc kubenswrapper[4691]: E1124 09:34:20.643377 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61f293cd-0fc7-4424-b740-d7096c0bce85" containerName="container-00" Nov 24 09:34:20 crc kubenswrapper[4691]: I1124 09:34:20.643414 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="61f293cd-0fc7-4424-b740-d7096c0bce85" containerName="container-00" Nov 24 09:34:20 crc kubenswrapper[4691]: I1124 09:34:20.643707 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="61f293cd-0fc7-4424-b740-d7096c0bce85" containerName="container-00" Nov 24 09:34:20 crc kubenswrapper[4691]: I1124 09:34:20.644485 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-t72qw/crc-debug-xt67p" Nov 24 09:34:20 crc kubenswrapper[4691]: I1124 09:34:20.757970 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5d0b58ba-e3cc-47df-a998-33ed585f1d74-host\") pod \"crc-debug-xt67p\" (UID: \"5d0b58ba-e3cc-47df-a998-33ed585f1d74\") " pod="openshift-must-gather-t72qw/crc-debug-xt67p" Nov 24 09:34:20 crc kubenswrapper[4691]: I1124 09:34:20.758166 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t4fnf\" (UniqueName: \"kubernetes.io/projected/5d0b58ba-e3cc-47df-a998-33ed585f1d74-kube-api-access-t4fnf\") pod \"crc-debug-xt67p\" (UID: \"5d0b58ba-e3cc-47df-a998-33ed585f1d74\") " pod="openshift-must-gather-t72qw/crc-debug-xt67p" Nov 24 09:34:20 crc kubenswrapper[4691]: I1124 09:34:20.775546 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="61f293cd-0fc7-4424-b740-d7096c0bce85" path="/var/lib/kubelet/pods/61f293cd-0fc7-4424-b740-d7096c0bce85/volumes" Nov 24 09:34:20 crc kubenswrapper[4691]: I1124 09:34:20.860474 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5d0b58ba-e3cc-47df-a998-33ed585f1d74-host\") pod \"crc-debug-xt67p\" (UID: \"5d0b58ba-e3cc-47df-a998-33ed585f1d74\") " pod="openshift-must-gather-t72qw/crc-debug-xt67p" Nov 24 09:34:20 crc kubenswrapper[4691]: I1124 09:34:20.860678 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t4fnf\" (UniqueName: \"kubernetes.io/projected/5d0b58ba-e3cc-47df-a998-33ed585f1d74-kube-api-access-t4fnf\") pod \"crc-debug-xt67p\" (UID: \"5d0b58ba-e3cc-47df-a998-33ed585f1d74\") " pod="openshift-must-gather-t72qw/crc-debug-xt67p" Nov 24 09:34:20 crc kubenswrapper[4691]: I1124 09:34:20.860920 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5d0b58ba-e3cc-47df-a998-33ed585f1d74-host\") pod \"crc-debug-xt67p\" (UID: \"5d0b58ba-e3cc-47df-a998-33ed585f1d74\") " pod="openshift-must-gather-t72qw/crc-debug-xt67p" Nov 24 09:34:20 crc kubenswrapper[4691]: I1124 09:34:20.880104 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t4fnf\" (UniqueName: \"kubernetes.io/projected/5d0b58ba-e3cc-47df-a998-33ed585f1d74-kube-api-access-t4fnf\") pod \"crc-debug-xt67p\" (UID: \"5d0b58ba-e3cc-47df-a998-33ed585f1d74\") " pod="openshift-must-gather-t72qw/crc-debug-xt67p" Nov 24 09:34:20 crc kubenswrapper[4691]: I1124 09:34:20.962137 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-t72qw/crc-debug-xt67p" Nov 24 09:34:21 crc kubenswrapper[4691]: I1124 09:34:21.326837 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-t72qw/crc-debug-xt67p" event={"ID":"5d0b58ba-e3cc-47df-a998-33ed585f1d74","Type":"ContainerStarted","Data":"9f9a9e428205204b015bcb6a94a9035d1a488f894e7e0fda7c544ed2459ec4b7"} Nov 24 09:34:21 crc kubenswrapper[4691]: I1124 09:34:21.327105 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-t72qw/crc-debug-xt67p" event={"ID":"5d0b58ba-e3cc-47df-a998-33ed585f1d74","Type":"ContainerStarted","Data":"f4e2740412fbf70d3bf22383899de689ec66f566abe374de8292821ece8b9a40"} Nov 24 09:34:21 crc kubenswrapper[4691]: I1124 09:34:21.345363 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-t72qw/crc-debug-xt67p" podStartSLOduration=1.345344869 podStartE2EDuration="1.345344869s" podCreationTimestamp="2025-11-24 09:34:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-24 09:34:21.343078065 +0000 UTC m=+5823.342027314" watchObservedRunningTime="2025-11-24 09:34:21.345344869 +0000 UTC m=+5823.344294108" Nov 24 09:34:21 crc kubenswrapper[4691]: I1124 09:34:21.760348 4691 scope.go:117] "RemoveContainer" containerID="9d5a7ae14d2062d9b51ce6e4e46f00f60586eef65cf9e6e4ed8bcf4d441bfe6c" Nov 24 09:34:21 crc kubenswrapper[4691]: E1124 09:34:21.761873 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:34:22 crc kubenswrapper[4691]: I1124 09:34:22.338632 4691 generic.go:334] "Generic (PLEG): container finished" podID="5d0b58ba-e3cc-47df-a998-33ed585f1d74" containerID="9f9a9e428205204b015bcb6a94a9035d1a488f894e7e0fda7c544ed2459ec4b7" exitCode=0 Nov 24 09:34:22 crc kubenswrapper[4691]: I1124 09:34:22.338696 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-t72qw/crc-debug-xt67p" event={"ID":"5d0b58ba-e3cc-47df-a998-33ed585f1d74","Type":"ContainerDied","Data":"9f9a9e428205204b015bcb6a94a9035d1a488f894e7e0fda7c544ed2459ec4b7"} Nov 24 09:34:23 crc kubenswrapper[4691]: I1124 09:34:23.450124 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-t72qw/crc-debug-xt67p" Nov 24 09:34:23 crc kubenswrapper[4691]: I1124 09:34:23.616828 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t4fnf\" (UniqueName: \"kubernetes.io/projected/5d0b58ba-e3cc-47df-a998-33ed585f1d74-kube-api-access-t4fnf\") pod \"5d0b58ba-e3cc-47df-a998-33ed585f1d74\" (UID: \"5d0b58ba-e3cc-47df-a998-33ed585f1d74\") " Nov 24 09:34:23 crc kubenswrapper[4691]: I1124 09:34:23.617109 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5d0b58ba-e3cc-47df-a998-33ed585f1d74-host\") pod \"5d0b58ba-e3cc-47df-a998-33ed585f1d74\" (UID: \"5d0b58ba-e3cc-47df-a998-33ed585f1d74\") " Nov 24 09:34:23 crc kubenswrapper[4691]: I1124 09:34:23.617186 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5d0b58ba-e3cc-47df-a998-33ed585f1d74-host" (OuterVolumeSpecName: "host") pod "5d0b58ba-e3cc-47df-a998-33ed585f1d74" (UID: "5d0b58ba-e3cc-47df-a998-33ed585f1d74"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 09:34:23 crc kubenswrapper[4691]: I1124 09:34:23.617697 4691 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5d0b58ba-e3cc-47df-a998-33ed585f1d74-host\") on node \"crc\" DevicePath \"\"" Nov 24 09:34:23 crc kubenswrapper[4691]: I1124 09:34:23.623835 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d0b58ba-e3cc-47df-a998-33ed585f1d74-kube-api-access-t4fnf" (OuterVolumeSpecName: "kube-api-access-t4fnf") pod "5d0b58ba-e3cc-47df-a998-33ed585f1d74" (UID: "5d0b58ba-e3cc-47df-a998-33ed585f1d74"). InnerVolumeSpecName "kube-api-access-t4fnf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 09:34:23 crc kubenswrapper[4691]: I1124 09:34:23.719070 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t4fnf\" (UniqueName: \"kubernetes.io/projected/5d0b58ba-e3cc-47df-a998-33ed585f1d74-kube-api-access-t4fnf\") on node \"crc\" DevicePath \"\"" Nov 24 09:34:23 crc kubenswrapper[4691]: I1124 09:34:23.732463 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-t72qw/crc-debug-xt67p"] Nov 24 09:34:23 crc kubenswrapper[4691]: I1124 09:34:23.741485 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-t72qw/crc-debug-xt67p"] Nov 24 09:34:24 crc kubenswrapper[4691]: I1124 09:34:24.355695 4691 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f4e2740412fbf70d3bf22383899de689ec66f566abe374de8292821ece8b9a40" Nov 24 09:34:24 crc kubenswrapper[4691]: I1124 09:34:24.355756 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-t72qw/crc-debug-xt67p" Nov 24 09:34:24 crc kubenswrapper[4691]: I1124 09:34:24.784263 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5d0b58ba-e3cc-47df-a998-33ed585f1d74" path="/var/lib/kubelet/pods/5d0b58ba-e3cc-47df-a998-33ed585f1d74/volumes" Nov 24 09:34:24 crc kubenswrapper[4691]: I1124 09:34:24.938426 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-t72qw/crc-debug-2c9p8"] Nov 24 09:34:24 crc kubenswrapper[4691]: E1124 09:34:24.938932 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d0b58ba-e3cc-47df-a998-33ed585f1d74" containerName="container-00" Nov 24 09:34:24 crc kubenswrapper[4691]: I1124 09:34:24.938953 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d0b58ba-e3cc-47df-a998-33ed585f1d74" containerName="container-00" Nov 24 09:34:24 crc kubenswrapper[4691]: I1124 09:34:24.939174 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d0b58ba-e3cc-47df-a998-33ed585f1d74" containerName="container-00" Nov 24 09:34:24 crc kubenswrapper[4691]: I1124 09:34:24.939971 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-t72qw/crc-debug-2c9p8" Nov 24 09:34:25 crc kubenswrapper[4691]: I1124 09:34:25.050710 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dz5mz\" (UniqueName: \"kubernetes.io/projected/3cdcc3b7-8915-419f-b7c5-6cc92fc02b95-kube-api-access-dz5mz\") pod \"crc-debug-2c9p8\" (UID: \"3cdcc3b7-8915-419f-b7c5-6cc92fc02b95\") " pod="openshift-must-gather-t72qw/crc-debug-2c9p8" Nov 24 09:34:25 crc kubenswrapper[4691]: I1124 09:34:25.051200 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3cdcc3b7-8915-419f-b7c5-6cc92fc02b95-host\") pod \"crc-debug-2c9p8\" (UID: \"3cdcc3b7-8915-419f-b7c5-6cc92fc02b95\") " pod="openshift-must-gather-t72qw/crc-debug-2c9p8" Nov 24 09:34:25 crc kubenswrapper[4691]: I1124 09:34:25.152714 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3cdcc3b7-8915-419f-b7c5-6cc92fc02b95-host\") pod \"crc-debug-2c9p8\" (UID: \"3cdcc3b7-8915-419f-b7c5-6cc92fc02b95\") " pod="openshift-must-gather-t72qw/crc-debug-2c9p8" Nov 24 09:34:25 crc kubenswrapper[4691]: I1124 09:34:25.152833 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dz5mz\" (UniqueName: \"kubernetes.io/projected/3cdcc3b7-8915-419f-b7c5-6cc92fc02b95-kube-api-access-dz5mz\") pod \"crc-debug-2c9p8\" (UID: \"3cdcc3b7-8915-419f-b7c5-6cc92fc02b95\") " pod="openshift-must-gather-t72qw/crc-debug-2c9p8" Nov 24 09:34:25 crc kubenswrapper[4691]: I1124 09:34:25.153207 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3cdcc3b7-8915-419f-b7c5-6cc92fc02b95-host\") pod \"crc-debug-2c9p8\" (UID: \"3cdcc3b7-8915-419f-b7c5-6cc92fc02b95\") " pod="openshift-must-gather-t72qw/crc-debug-2c9p8" Nov 24 09:34:25 crc kubenswrapper[4691]: I1124 09:34:25.177667 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dz5mz\" (UniqueName: \"kubernetes.io/projected/3cdcc3b7-8915-419f-b7c5-6cc92fc02b95-kube-api-access-dz5mz\") pod \"crc-debug-2c9p8\" (UID: \"3cdcc3b7-8915-419f-b7c5-6cc92fc02b95\") " pod="openshift-must-gather-t72qw/crc-debug-2c9p8" Nov 24 09:34:25 crc kubenswrapper[4691]: I1124 09:34:25.259639 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-t72qw/crc-debug-2c9p8" Nov 24 09:34:25 crc kubenswrapper[4691]: I1124 09:34:25.365193 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-t72qw/crc-debug-2c9p8" event={"ID":"3cdcc3b7-8915-419f-b7c5-6cc92fc02b95","Type":"ContainerStarted","Data":"080747e944b07b1be348ed81b4fb568c310b4cf34603f692032cc6cd11788adb"} Nov 24 09:34:26 crc kubenswrapper[4691]: I1124 09:34:26.376675 4691 generic.go:334] "Generic (PLEG): container finished" podID="3cdcc3b7-8915-419f-b7c5-6cc92fc02b95" containerID="c7b47555620e9432e2804981355d8d56314d4e4b3e1fc88c14fb3105d5c2d3c7" exitCode=0 Nov 24 09:34:26 crc kubenswrapper[4691]: I1124 09:34:26.376782 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-t72qw/crc-debug-2c9p8" event={"ID":"3cdcc3b7-8915-419f-b7c5-6cc92fc02b95","Type":"ContainerDied","Data":"c7b47555620e9432e2804981355d8d56314d4e4b3e1fc88c14fb3105d5c2d3c7"} Nov 24 09:34:26 crc kubenswrapper[4691]: I1124 09:34:26.416260 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-t72qw/crc-debug-2c9p8"] Nov 24 09:34:26 crc kubenswrapper[4691]: I1124 09:34:26.426858 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-t72qw/crc-debug-2c9p8"] Nov 24 09:34:27 crc kubenswrapper[4691]: I1124 09:34:27.488500 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-t72qw/crc-debug-2c9p8" Nov 24 09:34:27 crc kubenswrapper[4691]: I1124 09:34:27.597198 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3cdcc3b7-8915-419f-b7c5-6cc92fc02b95-host\") pod \"3cdcc3b7-8915-419f-b7c5-6cc92fc02b95\" (UID: \"3cdcc3b7-8915-419f-b7c5-6cc92fc02b95\") " Nov 24 09:34:27 crc kubenswrapper[4691]: I1124 09:34:27.597395 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dz5mz\" (UniqueName: \"kubernetes.io/projected/3cdcc3b7-8915-419f-b7c5-6cc92fc02b95-kube-api-access-dz5mz\") pod \"3cdcc3b7-8915-419f-b7c5-6cc92fc02b95\" (UID: \"3cdcc3b7-8915-419f-b7c5-6cc92fc02b95\") " Nov 24 09:34:27 crc kubenswrapper[4691]: I1124 09:34:27.597403 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3cdcc3b7-8915-419f-b7c5-6cc92fc02b95-host" (OuterVolumeSpecName: "host") pod "3cdcc3b7-8915-419f-b7c5-6cc92fc02b95" (UID: "3cdcc3b7-8915-419f-b7c5-6cc92fc02b95"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 24 09:34:27 crc kubenswrapper[4691]: I1124 09:34:27.597852 4691 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3cdcc3b7-8915-419f-b7c5-6cc92fc02b95-host\") on node \"crc\" DevicePath \"\"" Nov 24 09:34:27 crc kubenswrapper[4691]: I1124 09:34:27.603203 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cdcc3b7-8915-419f-b7c5-6cc92fc02b95-kube-api-access-dz5mz" (OuterVolumeSpecName: "kube-api-access-dz5mz") pod "3cdcc3b7-8915-419f-b7c5-6cc92fc02b95" (UID: "3cdcc3b7-8915-419f-b7c5-6cc92fc02b95"). InnerVolumeSpecName "kube-api-access-dz5mz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 09:34:27 crc kubenswrapper[4691]: I1124 09:34:27.656489 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-6phw7"] Nov 24 09:34:27 crc kubenswrapper[4691]: E1124 09:34:27.656908 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3cdcc3b7-8915-419f-b7c5-6cc92fc02b95" containerName="container-00" Nov 24 09:34:27 crc kubenswrapper[4691]: I1124 09:34:27.656924 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="3cdcc3b7-8915-419f-b7c5-6cc92fc02b95" containerName="container-00" Nov 24 09:34:27 crc kubenswrapper[4691]: I1124 09:34:27.657132 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="3cdcc3b7-8915-419f-b7c5-6cc92fc02b95" containerName="container-00" Nov 24 09:34:27 crc kubenswrapper[4691]: I1124 09:34:27.658508 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6phw7" Nov 24 09:34:27 crc kubenswrapper[4691]: I1124 09:34:27.666181 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6phw7"] Nov 24 09:34:27 crc kubenswrapper[4691]: I1124 09:34:27.699786 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dz5mz\" (UniqueName: \"kubernetes.io/projected/3cdcc3b7-8915-419f-b7c5-6cc92fc02b95-kube-api-access-dz5mz\") on node \"crc\" DevicePath \"\"" Nov 24 09:34:27 crc kubenswrapper[4691]: I1124 09:34:27.801654 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/950621d8-ef6c-46c3-a427-311404acecd6-catalog-content\") pod \"community-operators-6phw7\" (UID: \"950621d8-ef6c-46c3-a427-311404acecd6\") " pod="openshift-marketplace/community-operators-6phw7" Nov 24 09:34:27 crc kubenswrapper[4691]: I1124 09:34:27.801978 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4tvlz\" (UniqueName: \"kubernetes.io/projected/950621d8-ef6c-46c3-a427-311404acecd6-kube-api-access-4tvlz\") pod \"community-operators-6phw7\" (UID: \"950621d8-ef6c-46c3-a427-311404acecd6\") " pod="openshift-marketplace/community-operators-6phw7" Nov 24 09:34:27 crc kubenswrapper[4691]: I1124 09:34:27.802191 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/950621d8-ef6c-46c3-a427-311404acecd6-utilities\") pod \"community-operators-6phw7\" (UID: \"950621d8-ef6c-46c3-a427-311404acecd6\") " pod="openshift-marketplace/community-operators-6phw7" Nov 24 09:34:27 crc kubenswrapper[4691]: I1124 09:34:27.904187 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/950621d8-ef6c-46c3-a427-311404acecd6-catalog-content\") pod \"community-operators-6phw7\" (UID: \"950621d8-ef6c-46c3-a427-311404acecd6\") " pod="openshift-marketplace/community-operators-6phw7" Nov 24 09:34:27 crc kubenswrapper[4691]: I1124 09:34:27.904253 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4tvlz\" (UniqueName: \"kubernetes.io/projected/950621d8-ef6c-46c3-a427-311404acecd6-kube-api-access-4tvlz\") pod \"community-operators-6phw7\" (UID: \"950621d8-ef6c-46c3-a427-311404acecd6\") " pod="openshift-marketplace/community-operators-6phw7" Nov 24 09:34:27 crc kubenswrapper[4691]: I1124 09:34:27.904334 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/950621d8-ef6c-46c3-a427-311404acecd6-utilities\") pod \"community-operators-6phw7\" (UID: \"950621d8-ef6c-46c3-a427-311404acecd6\") " pod="openshift-marketplace/community-operators-6phw7" Nov 24 09:34:27 crc kubenswrapper[4691]: I1124 09:34:27.904778 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/950621d8-ef6c-46c3-a427-311404acecd6-catalog-content\") pod \"community-operators-6phw7\" (UID: \"950621d8-ef6c-46c3-a427-311404acecd6\") " pod="openshift-marketplace/community-operators-6phw7" Nov 24 09:34:27 crc kubenswrapper[4691]: I1124 09:34:27.904783 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/950621d8-ef6c-46c3-a427-311404acecd6-utilities\") pod \"community-operators-6phw7\" (UID: \"950621d8-ef6c-46c3-a427-311404acecd6\") " pod="openshift-marketplace/community-operators-6phw7" Nov 24 09:34:27 crc kubenswrapper[4691]: I1124 09:34:27.923410 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4tvlz\" (UniqueName: \"kubernetes.io/projected/950621d8-ef6c-46c3-a427-311404acecd6-kube-api-access-4tvlz\") pod \"community-operators-6phw7\" (UID: \"950621d8-ef6c-46c3-a427-311404acecd6\") " pod="openshift-marketplace/community-operators-6phw7" Nov 24 09:34:27 crc kubenswrapper[4691]: I1124 09:34:27.987815 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6phw7" Nov 24 09:34:28 crc kubenswrapper[4691]: I1124 09:34:28.399804 4691 scope.go:117] "RemoveContainer" containerID="c7b47555620e9432e2804981355d8d56314d4e4b3e1fc88c14fb3105d5c2d3c7" Nov 24 09:34:28 crc kubenswrapper[4691]: I1124 09:34:28.399832 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-t72qw/crc-debug-2c9p8" Nov 24 09:34:28 crc kubenswrapper[4691]: W1124 09:34:28.660946 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod950621d8_ef6c_46c3_a427_311404acecd6.slice/crio-46441f8ba3c3762d080ac64a5c090822feac2a77a950866b64a964b09080dafb WatchSource:0}: Error finding container 46441f8ba3c3762d080ac64a5c090822feac2a77a950866b64a964b09080dafb: Status 404 returned error can't find the container with id 46441f8ba3c3762d080ac64a5c090822feac2a77a950866b64a964b09080dafb Nov 24 09:34:28 crc kubenswrapper[4691]: I1124 09:34:28.662969 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6phw7"] Nov 24 09:34:28 crc kubenswrapper[4691]: I1124 09:34:28.774744 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cdcc3b7-8915-419f-b7c5-6cc92fc02b95" path="/var/lib/kubelet/pods/3cdcc3b7-8915-419f-b7c5-6cc92fc02b95/volumes" Nov 24 09:34:29 crc kubenswrapper[4691]: I1124 09:34:29.416256 4691 generic.go:334] "Generic (PLEG): container finished" podID="950621d8-ef6c-46c3-a427-311404acecd6" containerID="f5b575d636839ac1f52f8a5d4f1fa62633e3c7f79e0d3570466f34e3397b2470" exitCode=0 Nov 24 09:34:29 crc kubenswrapper[4691]: I1124 09:34:29.416521 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6phw7" event={"ID":"950621d8-ef6c-46c3-a427-311404acecd6","Type":"ContainerDied","Data":"f5b575d636839ac1f52f8a5d4f1fa62633e3c7f79e0d3570466f34e3397b2470"} Nov 24 09:34:29 crc kubenswrapper[4691]: I1124 09:34:29.416914 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6phw7" event={"ID":"950621d8-ef6c-46c3-a427-311404acecd6","Type":"ContainerStarted","Data":"46441f8ba3c3762d080ac64a5c090822feac2a77a950866b64a964b09080dafb"} Nov 24 09:34:30 crc kubenswrapper[4691]: I1124 09:34:30.433736 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6phw7" event={"ID":"950621d8-ef6c-46c3-a427-311404acecd6","Type":"ContainerStarted","Data":"93668353b11c9472ef420fa88f2a93abc8dc165b61e1453bfcf29b815e9661e8"} Nov 24 09:34:31 crc kubenswrapper[4691]: I1124 09:34:31.444069 4691 generic.go:334] "Generic (PLEG): container finished" podID="950621d8-ef6c-46c3-a427-311404acecd6" containerID="93668353b11c9472ef420fa88f2a93abc8dc165b61e1453bfcf29b815e9661e8" exitCode=0 Nov 24 09:34:31 crc kubenswrapper[4691]: I1124 09:34:31.444121 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6phw7" event={"ID":"950621d8-ef6c-46c3-a427-311404acecd6","Type":"ContainerDied","Data":"93668353b11c9472ef420fa88f2a93abc8dc165b61e1453bfcf29b815e9661e8"} Nov 24 09:34:32 crc kubenswrapper[4691]: I1124 09:34:32.454019 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6phw7" event={"ID":"950621d8-ef6c-46c3-a427-311404acecd6","Type":"ContainerStarted","Data":"222d371fe6f1a946a530478edaf978d39095796eecd3d44b6a02798534e35378"} Nov 24 09:34:32 crc kubenswrapper[4691]: I1124 09:34:32.493944 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-6phw7" podStartSLOduration=3.041510251 podStartE2EDuration="5.493921713s" podCreationTimestamp="2025-11-24 09:34:27 +0000 UTC" firstStartedPulling="2025-11-24 09:34:29.419150565 +0000 UTC m=+5831.418099834" lastFinishedPulling="2025-11-24 09:34:31.871562047 +0000 UTC m=+5833.870511296" observedRunningTime="2025-11-24 09:34:32.469868838 +0000 UTC m=+5834.468818087" watchObservedRunningTime="2025-11-24 09:34:32.493921713 +0000 UTC m=+5834.492870972" Nov 24 09:34:36 crc kubenswrapper[4691]: I1124 09:34:36.760843 4691 scope.go:117] "RemoveContainer" containerID="9d5a7ae14d2062d9b51ce6e4e46f00f60586eef65cf9e6e4ed8bcf4d441bfe6c" Nov 24 09:34:36 crc kubenswrapper[4691]: E1124 09:34:36.761341 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:34:37 crc kubenswrapper[4691]: I1124 09:34:37.988150 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-6phw7" Nov 24 09:34:37 crc kubenswrapper[4691]: I1124 09:34:37.988577 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-6phw7" Nov 24 09:34:38 crc kubenswrapper[4691]: I1124 09:34:38.041840 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-6phw7" Nov 24 09:34:38 crc kubenswrapper[4691]: I1124 09:34:38.578720 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-6phw7" Nov 24 09:34:38 crc kubenswrapper[4691]: I1124 09:34:38.624782 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6phw7"] Nov 24 09:34:40 crc kubenswrapper[4691]: I1124 09:34:40.524555 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-6phw7" podUID="950621d8-ef6c-46c3-a427-311404acecd6" containerName="registry-server" containerID="cri-o://222d371fe6f1a946a530478edaf978d39095796eecd3d44b6a02798534e35378" gracePeriod=2 Nov 24 09:34:41 crc kubenswrapper[4691]: I1124 09:34:41.023282 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6phw7" Nov 24 09:34:41 crc kubenswrapper[4691]: I1124 09:34:41.177921 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/950621d8-ef6c-46c3-a427-311404acecd6-catalog-content\") pod \"950621d8-ef6c-46c3-a427-311404acecd6\" (UID: \"950621d8-ef6c-46c3-a427-311404acecd6\") " Nov 24 09:34:41 crc kubenswrapper[4691]: I1124 09:34:41.178005 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/950621d8-ef6c-46c3-a427-311404acecd6-utilities\") pod \"950621d8-ef6c-46c3-a427-311404acecd6\" (UID: \"950621d8-ef6c-46c3-a427-311404acecd6\") " Nov 24 09:34:41 crc kubenswrapper[4691]: I1124 09:34:41.178147 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4tvlz\" (UniqueName: \"kubernetes.io/projected/950621d8-ef6c-46c3-a427-311404acecd6-kube-api-access-4tvlz\") pod \"950621d8-ef6c-46c3-a427-311404acecd6\" (UID: \"950621d8-ef6c-46c3-a427-311404acecd6\") " Nov 24 09:34:41 crc kubenswrapper[4691]: I1124 09:34:41.179523 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/950621d8-ef6c-46c3-a427-311404acecd6-utilities" (OuterVolumeSpecName: "utilities") pod "950621d8-ef6c-46c3-a427-311404acecd6" (UID: "950621d8-ef6c-46c3-a427-311404acecd6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 09:34:41 crc kubenswrapper[4691]: I1124 09:34:41.196768 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/950621d8-ef6c-46c3-a427-311404acecd6-kube-api-access-4tvlz" (OuterVolumeSpecName: "kube-api-access-4tvlz") pod "950621d8-ef6c-46c3-a427-311404acecd6" (UID: "950621d8-ef6c-46c3-a427-311404acecd6"). InnerVolumeSpecName "kube-api-access-4tvlz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 09:34:41 crc kubenswrapper[4691]: I1124 09:34:41.235893 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/950621d8-ef6c-46c3-a427-311404acecd6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "950621d8-ef6c-46c3-a427-311404acecd6" (UID: "950621d8-ef6c-46c3-a427-311404acecd6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 09:34:41 crc kubenswrapper[4691]: I1124 09:34:41.280165 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4tvlz\" (UniqueName: \"kubernetes.io/projected/950621d8-ef6c-46c3-a427-311404acecd6-kube-api-access-4tvlz\") on node \"crc\" DevicePath \"\"" Nov 24 09:34:41 crc kubenswrapper[4691]: I1124 09:34:41.280193 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/950621d8-ef6c-46c3-a427-311404acecd6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 09:34:41 crc kubenswrapper[4691]: I1124 09:34:41.280202 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/950621d8-ef6c-46c3-a427-311404acecd6-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 09:34:41 crc kubenswrapper[4691]: I1124 09:34:41.535659 4691 generic.go:334] "Generic (PLEG): container finished" podID="950621d8-ef6c-46c3-a427-311404acecd6" containerID="222d371fe6f1a946a530478edaf978d39095796eecd3d44b6a02798534e35378" exitCode=0 Nov 24 09:34:41 crc kubenswrapper[4691]: I1124 09:34:41.535850 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6phw7" event={"ID":"950621d8-ef6c-46c3-a427-311404acecd6","Type":"ContainerDied","Data":"222d371fe6f1a946a530478edaf978d39095796eecd3d44b6a02798534e35378"} Nov 24 09:34:41 crc kubenswrapper[4691]: I1124 09:34:41.536942 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6phw7" event={"ID":"950621d8-ef6c-46c3-a427-311404acecd6","Type":"ContainerDied","Data":"46441f8ba3c3762d080ac64a5c090822feac2a77a950866b64a964b09080dafb"} Nov 24 09:34:41 crc kubenswrapper[4691]: I1124 09:34:41.537019 4691 scope.go:117] "RemoveContainer" containerID="222d371fe6f1a946a530478edaf978d39095796eecd3d44b6a02798534e35378" Nov 24 09:34:41 crc kubenswrapper[4691]: I1124 09:34:41.535921 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6phw7" Nov 24 09:34:41 crc kubenswrapper[4691]: I1124 09:34:41.581432 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6phw7"] Nov 24 09:34:41 crc kubenswrapper[4691]: I1124 09:34:41.583035 4691 scope.go:117] "RemoveContainer" containerID="93668353b11c9472ef420fa88f2a93abc8dc165b61e1453bfcf29b815e9661e8" Nov 24 09:34:41 crc kubenswrapper[4691]: I1124 09:34:41.590407 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-6phw7"] Nov 24 09:34:41 crc kubenswrapper[4691]: I1124 09:34:41.611223 4691 scope.go:117] "RemoveContainer" containerID="f5b575d636839ac1f52f8a5d4f1fa62633e3c7f79e0d3570466f34e3397b2470" Nov 24 09:34:41 crc kubenswrapper[4691]: I1124 09:34:41.673806 4691 scope.go:117] "RemoveContainer" containerID="222d371fe6f1a946a530478edaf978d39095796eecd3d44b6a02798534e35378" Nov 24 09:34:41 crc kubenswrapper[4691]: E1124 09:34:41.674235 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"222d371fe6f1a946a530478edaf978d39095796eecd3d44b6a02798534e35378\": container with ID starting with 222d371fe6f1a946a530478edaf978d39095796eecd3d44b6a02798534e35378 not found: ID does not exist" containerID="222d371fe6f1a946a530478edaf978d39095796eecd3d44b6a02798534e35378" Nov 24 09:34:41 crc kubenswrapper[4691]: I1124 09:34:41.674273 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"222d371fe6f1a946a530478edaf978d39095796eecd3d44b6a02798534e35378"} err="failed to get container status \"222d371fe6f1a946a530478edaf978d39095796eecd3d44b6a02798534e35378\": rpc error: code = NotFound desc = could not find container \"222d371fe6f1a946a530478edaf978d39095796eecd3d44b6a02798534e35378\": container with ID starting with 222d371fe6f1a946a530478edaf978d39095796eecd3d44b6a02798534e35378 not found: ID does not exist" Nov 24 09:34:41 crc kubenswrapper[4691]: I1124 09:34:41.674292 4691 scope.go:117] "RemoveContainer" containerID="93668353b11c9472ef420fa88f2a93abc8dc165b61e1453bfcf29b815e9661e8" Nov 24 09:34:41 crc kubenswrapper[4691]: E1124 09:34:41.674773 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"93668353b11c9472ef420fa88f2a93abc8dc165b61e1453bfcf29b815e9661e8\": container with ID starting with 93668353b11c9472ef420fa88f2a93abc8dc165b61e1453bfcf29b815e9661e8 not found: ID does not exist" containerID="93668353b11c9472ef420fa88f2a93abc8dc165b61e1453bfcf29b815e9661e8" Nov 24 09:34:41 crc kubenswrapper[4691]: I1124 09:34:41.674870 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93668353b11c9472ef420fa88f2a93abc8dc165b61e1453bfcf29b815e9661e8"} err="failed to get container status \"93668353b11c9472ef420fa88f2a93abc8dc165b61e1453bfcf29b815e9661e8\": rpc error: code = NotFound desc = could not find container \"93668353b11c9472ef420fa88f2a93abc8dc165b61e1453bfcf29b815e9661e8\": container with ID starting with 93668353b11c9472ef420fa88f2a93abc8dc165b61e1453bfcf29b815e9661e8 not found: ID does not exist" Nov 24 09:34:41 crc kubenswrapper[4691]: I1124 09:34:41.674932 4691 scope.go:117] "RemoveContainer" containerID="f5b575d636839ac1f52f8a5d4f1fa62633e3c7f79e0d3570466f34e3397b2470" Nov 24 09:34:41 crc kubenswrapper[4691]: E1124 09:34:41.675181 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f5b575d636839ac1f52f8a5d4f1fa62633e3c7f79e0d3570466f34e3397b2470\": container with ID starting with f5b575d636839ac1f52f8a5d4f1fa62633e3c7f79e0d3570466f34e3397b2470 not found: ID does not exist" containerID="f5b575d636839ac1f52f8a5d4f1fa62633e3c7f79e0d3570466f34e3397b2470" Nov 24 09:34:41 crc kubenswrapper[4691]: I1124 09:34:41.675204 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f5b575d636839ac1f52f8a5d4f1fa62633e3c7f79e0d3570466f34e3397b2470"} err="failed to get container status \"f5b575d636839ac1f52f8a5d4f1fa62633e3c7f79e0d3570466f34e3397b2470\": rpc error: code = NotFound desc = could not find container \"f5b575d636839ac1f52f8a5d4f1fa62633e3c7f79e0d3570466f34e3397b2470\": container with ID starting with f5b575d636839ac1f52f8a5d4f1fa62633e3c7f79e0d3570466f34e3397b2470 not found: ID does not exist" Nov 24 09:34:42 crc kubenswrapper[4691]: I1124 09:34:42.771494 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="950621d8-ef6c-46c3-a427-311404acecd6" path="/var/lib/kubelet/pods/950621d8-ef6c-46c3-a427-311404acecd6/volumes" Nov 24 09:34:45 crc kubenswrapper[4691]: I1124 09:34:45.285273 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-7d7c46cd68-xl465_f904b6e6-711f-4edd-bdaf-1eeca5979318/barbican-api/0.log" Nov 24 09:34:45 crc kubenswrapper[4691]: I1124 09:34:45.424585 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-7d7c46cd68-xl465_f904b6e6-711f-4edd-bdaf-1eeca5979318/barbican-api-log/0.log" Nov 24 09:34:45 crc kubenswrapper[4691]: I1124 09:34:45.519284 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-6866b57cd6-xcpbl_750147cd-32ed-4f3d-83e5-96798011bf10/barbican-keystone-listener/0.log" Nov 24 09:34:45 crc kubenswrapper[4691]: I1124 09:34:45.635025 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-6866b57cd6-xcpbl_750147cd-32ed-4f3d-83e5-96798011bf10/barbican-keystone-listener-log/0.log" Nov 24 09:34:45 crc kubenswrapper[4691]: I1124 09:34:45.707591 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-bcd6fbf67-bnwn2_e39f7b55-5583-421f-a817-bae68533b497/barbican-worker/0.log" Nov 24 09:34:45 crc kubenswrapper[4691]: I1124 09:34:45.821169 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-bcd6fbf67-bnwn2_e39f7b55-5583-421f-a817-bae68533b497/barbican-worker-log/0.log" Nov 24 09:34:45 crc kubenswrapper[4691]: I1124 09:34:45.923166 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-t9n92_6199a668-e1b5-473b-8ff0-2fdf26b69c79/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 09:34:46 crc kubenswrapper[4691]: I1124 09:34:46.082684 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_0e783b59-54e1-401f-a281-b665848b7083/ceilometer-central-agent/0.log" Nov 24 09:34:46 crc kubenswrapper[4691]: I1124 09:34:46.123725 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_0e783b59-54e1-401f-a281-b665848b7083/proxy-httpd/0.log" Nov 24 09:34:46 crc kubenswrapper[4691]: I1124 09:34:46.149614 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_0e783b59-54e1-401f-a281-b665848b7083/sg-core/0.log" Nov 24 09:34:46 crc kubenswrapper[4691]: I1124 09:34:46.183260 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_0e783b59-54e1-401f-a281-b665848b7083/ceilometer-notification-agent/0.log" Nov 24 09:34:46 crc kubenswrapper[4691]: I1124 09:34:46.374999 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_e9b3d587-fa7c-4af9-8667-d4ea91483ad9/cinder-api-log/0.log" Nov 24 09:34:46 crc kubenswrapper[4691]: I1124 09:34:46.422559 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_e9b3d587-fa7c-4af9-8667-d4ea91483ad9/cinder-api/0.log" Nov 24 09:34:46 crc kubenswrapper[4691]: I1124 09:34:46.559174 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_68176dd8-7480-4c30-8788-dd915e1568d5/cinder-scheduler/0.log" Nov 24 09:34:46 crc kubenswrapper[4691]: I1124 09:34:46.612021 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_68176dd8-7480-4c30-8788-dd915e1568d5/probe/0.log" Nov 24 09:34:46 crc kubenswrapper[4691]: I1124 09:34:46.703974 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-764zv_7a0ce3be-4dc4-4451-979d-0f8a4372e061/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 09:34:46 crc kubenswrapper[4691]: I1124 09:34:46.847825 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-sgd4k_1ac65fef-8c31-48ea-9715-9245e9dd717e/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 09:34:46 crc kubenswrapper[4691]: I1124 09:34:46.939030 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78c64bc9c5-gx44c_2f860729-9ea4-4236-9465-68ac2164ac5c/init/0.log" Nov 24 09:34:47 crc kubenswrapper[4691]: I1124 09:34:47.092687 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78c64bc9c5-gx44c_2f860729-9ea4-4236-9465-68ac2164ac5c/init/0.log" Nov 24 09:34:47 crc kubenswrapper[4691]: I1124 09:34:47.224957 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-svnp5_460ba73d-0917-4b4c-8ca1-141a72e6b3e4/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 09:34:47 crc kubenswrapper[4691]: I1124 09:34:47.329826 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78c64bc9c5-gx44c_2f860729-9ea4-4236-9465-68ac2164ac5c/dnsmasq-dns/0.log" Nov 24 09:34:47 crc kubenswrapper[4691]: I1124 09:34:47.403802 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3/glance-httpd/0.log" Nov 24 09:34:47 crc kubenswrapper[4691]: I1124 09:34:47.602431 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_02c207e4-4d7c-4e35-8296-6dcfe5a7b0a3/glance-log/0.log" Nov 24 09:34:47 crc kubenswrapper[4691]: I1124 09:34:47.740013 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_31f53279-5e1f-44f9-a1f5-338600bc0156/glance-httpd/0.log" Nov 24 09:34:47 crc kubenswrapper[4691]: I1124 09:34:47.741728 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_31f53279-5e1f-44f9-a1f5-338600bc0156/glance-log/0.log" Nov 24 09:34:47 crc kubenswrapper[4691]: I1124 09:34:47.969615 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-5fb4677cdd-69rb6_5f7435d6-aa83-41a0-b392-b06d77f53aa2/horizon/0.log" Nov 24 09:34:48 crc kubenswrapper[4691]: I1124 09:34:48.116242 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-kk7cb_81f9a1f9-0d85-4aff-a92f-93e8b36724ff/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 09:34:48 crc kubenswrapper[4691]: I1124 09:34:48.284061 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-dphxx_5368c577-e1f7-45bf-9102-4e5422934e63/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 09:34:48 crc kubenswrapper[4691]: I1124 09:34:48.589385 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29399581-wrkt4_de9d27b1-63c1-4cc9-9bd6-9d015c3122cf/keystone-cron/0.log" Nov 24 09:34:48 crc kubenswrapper[4691]: I1124 09:34:48.699356 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-5fb4677cdd-69rb6_5f7435d6-aa83-41a0-b392-b06d77f53aa2/horizon-log/0.log" Nov 24 09:34:48 crc kubenswrapper[4691]: I1124 09:34:48.851056 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_9d9b7a95-3c3a-4254-b63e-214d34969aab/kube-state-metrics/0.log" Nov 24 09:34:49 crc kubenswrapper[4691]: I1124 09:34:49.053067 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-54fc9d9c65-98hdh_75e3a295-29f6-49d4-91d5-c6bf791eebdd/keystone-api/0.log" Nov 24 09:34:49 crc kubenswrapper[4691]: I1124 09:34:49.096477 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-dgvjm_e9953558-8b56-432e-bde8-c07beaa047c0/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 09:34:49 crc kubenswrapper[4691]: I1124 09:34:49.549645 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-tzzt2_42d1ff5d-430e-489b-9015-b8a7ad572893/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 09:34:49 crc kubenswrapper[4691]: I1124 09:34:49.741414 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-57b84ccfdc-qnsn7_8912bda5-405a-472b-a80f-2140a7bb0ded/neutron-httpd/0.log" Nov 24 09:34:50 crc kubenswrapper[4691]: I1124 09:34:50.091469 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-57b84ccfdc-qnsn7_8912bda5-405a-472b-a80f-2140a7bb0ded/neutron-api/0.log" Nov 24 09:34:50 crc kubenswrapper[4691]: I1124 09:34:50.726639 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_80ba081a-de68-4111-8dd6-ec207b574dee/nova-cell0-conductor-conductor/0.log" Nov 24 09:34:50 crc kubenswrapper[4691]: I1124 09:34:50.777827 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_70312fff-c511-48b1-a398-331d593ca41f/nova-cell1-conductor-conductor/0.log" Nov 24 09:34:51 crc kubenswrapper[4691]: I1124 09:34:51.378497 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_d624020f-236a-4048-acb6-a7db917757f6/nova-cell1-novncproxy-novncproxy/0.log" Nov 24 09:34:51 crc kubenswrapper[4691]: I1124 09:34:51.621557 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-zsxsb_bf4bcfba-eec4-43be-b119-cf8f0bdd7182/nova-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 09:34:51 crc kubenswrapper[4691]: I1124 09:34:51.706680 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_5f883df6-eeae-475d-80e8-ef121d343ae7/nova-api-log/0.log" Nov 24 09:34:51 crc kubenswrapper[4691]: I1124 09:34:51.760388 4691 scope.go:117] "RemoveContainer" containerID="9d5a7ae14d2062d9b51ce6e4e46f00f60586eef65cf9e6e4ed8bcf4d441bfe6c" Nov 24 09:34:51 crc kubenswrapper[4691]: E1124 09:34:51.760725 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:34:52 crc kubenswrapper[4691]: I1124 09:34:52.439910 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_816aeaf6-40c5-4859-b819-bcfb46750549/nova-metadata-log/0.log" Nov 24 09:34:52 crc kubenswrapper[4691]: I1124 09:34:52.641035 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_94098659-df1b-4792-b466-9e7a95bf19e2/nova-scheduler-scheduler/0.log" Nov 24 09:34:52 crc kubenswrapper[4691]: I1124 09:34:52.687338 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_5f883df6-eeae-475d-80e8-ef121d343ae7/nova-api-api/0.log" Nov 24 09:34:52 crc kubenswrapper[4691]: I1124 09:34:52.894812 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_5021ba85-77e5-4fc8-8816-5ad1587b82e5/mysql-bootstrap/0.log" Nov 24 09:34:53 crc kubenswrapper[4691]: I1124 09:34:53.074562 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_5021ba85-77e5-4fc8-8816-5ad1587b82e5/galera/0.log" Nov 24 09:34:53 crc kubenswrapper[4691]: I1124 09:34:53.104019 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_5021ba85-77e5-4fc8-8816-5ad1587b82e5/mysql-bootstrap/0.log" Nov 24 09:34:53 crc kubenswrapper[4691]: I1124 09:34:53.273441 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_be26bfeb-e0f8-4c67-8938-55d8399b717c/mysql-bootstrap/0.log" Nov 24 09:34:53 crc kubenswrapper[4691]: I1124 09:34:53.482539 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_be26bfeb-e0f8-4c67-8938-55d8399b717c/mysql-bootstrap/0.log" Nov 24 09:34:53 crc kubenswrapper[4691]: I1124 09:34:53.541735 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_be26bfeb-e0f8-4c67-8938-55d8399b717c/galera/0.log" Nov 24 09:34:53 crc kubenswrapper[4691]: I1124 09:34:53.719567 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_86907013-52ae-4aeb-a697-6066cfdbebde/openstackclient/0.log" Nov 24 09:34:53 crc kubenswrapper[4691]: I1124 09:34:53.784987 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-jknmq_204a8833-cf7b-491a-b06a-0c983a6aa30a/ovn-controller/0.log" Nov 24 09:34:54 crc kubenswrapper[4691]: I1124 09:34:54.026132 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-hs48v_16f9ca32-c0b3-4269-af05-a68a6d21269b/openstack-network-exporter/0.log" Nov 24 09:34:54 crc kubenswrapper[4691]: I1124 09:34:54.181161 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-pkx2n_8f3c496c-e0d1-4b16-80e9-fd3c10dacf79/ovsdb-server-init/0.log" Nov 24 09:34:54 crc kubenswrapper[4691]: I1124 09:34:54.564587 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-pkx2n_8f3c496c-e0d1-4b16-80e9-fd3c10dacf79/ovsdb-server-init/0.log" Nov 24 09:34:54 crc kubenswrapper[4691]: I1124 09:34:54.587558 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-pkx2n_8f3c496c-e0d1-4b16-80e9-fd3c10dacf79/ovs-vswitchd/0.log" Nov 24 09:34:54 crc kubenswrapper[4691]: I1124 09:34:54.637526 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-pkx2n_8f3c496c-e0d1-4b16-80e9-fd3c10dacf79/ovsdb-server/0.log" Nov 24 09:34:54 crc kubenswrapper[4691]: I1124 09:34:54.821643 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-zqkwb_7e5ff956-3876-4e10-bd7d-aa8c91fc7bd0/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 09:34:55 crc kubenswrapper[4691]: I1124 09:34:55.012532 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_4897f50d-627f-434b-a0d8-84854f219509/ovn-northd/0.log" Nov 24 09:34:55 crc kubenswrapper[4691]: I1124 09:34:55.031590 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_4897f50d-627f-434b-a0d8-84854f219509/openstack-network-exporter/0.log" Nov 24 09:34:55 crc kubenswrapper[4691]: I1124 09:34:55.201902 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_7d275bbe-d927-40c6-83b6-ad6da7f2a83c/openstack-network-exporter/0.log" Nov 24 09:34:55 crc kubenswrapper[4691]: I1124 09:34:55.274278 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_7d275bbe-d927-40c6-83b6-ad6da7f2a83c/ovsdbserver-nb/0.log" Nov 24 09:34:55 crc kubenswrapper[4691]: I1124 09:34:55.375287 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_57b5f932-160d-453a-ad0b-2b111085fda8/openstack-network-exporter/0.log" Nov 24 09:34:55 crc kubenswrapper[4691]: I1124 09:34:55.484467 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_57b5f932-160d-453a-ad0b-2b111085fda8/ovsdbserver-sb/0.log" Nov 24 09:34:55 crc kubenswrapper[4691]: I1124 09:34:55.552213 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_816aeaf6-40c5-4859-b819-bcfb46750549/nova-metadata-metadata/0.log" Nov 24 09:34:55 crc kubenswrapper[4691]: I1124 09:34:55.863328 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_4e65164c-c11a-4774-808c-f0dbdf7f9ffa/setup-container/0.log" Nov 24 09:34:55 crc kubenswrapper[4691]: I1124 09:34:55.940914 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-77fc7f8568-9mx5z_acaed1f5-7a77-46a1-936d-e0fa2a02767b/placement-api/0.log" Nov 24 09:34:56 crc kubenswrapper[4691]: I1124 09:34:56.028165 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_4e65164c-c11a-4774-808c-f0dbdf7f9ffa/setup-container/0.log" Nov 24 09:34:56 crc kubenswrapper[4691]: I1124 09:34:56.029143 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-77fc7f8568-9mx5z_acaed1f5-7a77-46a1-936d-e0fa2a02767b/placement-log/0.log" Nov 24 09:34:56 crc kubenswrapper[4691]: I1124 09:34:56.081827 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_4e65164c-c11a-4774-808c-f0dbdf7f9ffa/rabbitmq/0.log" Nov 24 09:34:56 crc kubenswrapper[4691]: I1124 09:34:56.304208 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_19b40ace-19bb-41b3-8b25-f93691331766/setup-container/0.log" Nov 24 09:34:56 crc kubenswrapper[4691]: I1124 09:34:56.506222 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_19b40ace-19bb-41b3-8b25-f93691331766/setup-container/0.log" Nov 24 09:34:56 crc kubenswrapper[4691]: I1124 09:34:56.515427 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_19b40ace-19bb-41b3-8b25-f93691331766/rabbitmq/0.log" Nov 24 09:34:56 crc kubenswrapper[4691]: I1124 09:34:56.562137 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-d5v6d_d9a70a19-1e34-4bf7-8b91-ed6df2838313/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 09:34:56 crc kubenswrapper[4691]: I1124 09:34:56.807085 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-mjvzd_7b0cd66f-4531-45fd-aea8-00726f118662/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 09:34:56 crc kubenswrapper[4691]: I1124 09:34:56.807526 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-ncxlx_4e07a8ba-4deb-45cb-8ecd-423300eadb7a/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 09:34:57 crc kubenswrapper[4691]: I1124 09:34:57.024698 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-6tlcl_c5c8e953-d111-42cd-8930-ee2c8f4242dd/ssh-known-hosts-edpm-deployment/0.log" Nov 24 09:34:57 crc kubenswrapper[4691]: I1124 09:34:57.092766 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-fgbtw_c306f14b-da97-42e1-87cc-612779e690e7/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 09:34:57 crc kubenswrapper[4691]: I1124 09:34:57.279629 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-6bf54cf5bc-7wgwz_9a9213e2-4a1f-4d15-ab02-472c467babfe/proxy-server/0.log" Nov 24 09:34:57 crc kubenswrapper[4691]: I1124 09:34:57.496288 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-6bf54cf5bc-7wgwz_9a9213e2-4a1f-4d15-ab02-472c467babfe/proxy-httpd/0.log" Nov 24 09:34:57 crc kubenswrapper[4691]: I1124 09:34:57.518524 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-tzjh7_5c00da32-542e-45b4-837c-67fa08ff49d3/swift-ring-rebalance/0.log" Nov 24 09:34:57 crc kubenswrapper[4691]: I1124 09:34:57.645787 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_94ab9159-218c-42b9-9c38-8e0701f3eeef/account-auditor/0.log" Nov 24 09:34:57 crc kubenswrapper[4691]: I1124 09:34:57.701431 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_94ab9159-218c-42b9-9c38-8e0701f3eeef/account-reaper/0.log" Nov 24 09:34:57 crc kubenswrapper[4691]: I1124 09:34:57.771483 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_94ab9159-218c-42b9-9c38-8e0701f3eeef/account-replicator/0.log" Nov 24 09:34:57 crc kubenswrapper[4691]: I1124 09:34:57.816406 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_94ab9159-218c-42b9-9c38-8e0701f3eeef/account-server/0.log" Nov 24 09:34:57 crc kubenswrapper[4691]: I1124 09:34:57.844389 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_94ab9159-218c-42b9-9c38-8e0701f3eeef/container-auditor/0.log" Nov 24 09:34:57 crc kubenswrapper[4691]: I1124 09:34:57.939083 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_94ab9159-218c-42b9-9c38-8e0701f3eeef/container-replicator/0.log" Nov 24 09:34:57 crc kubenswrapper[4691]: I1124 09:34:57.989867 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_94ab9159-218c-42b9-9c38-8e0701f3eeef/container-server/0.log" Nov 24 09:34:58 crc kubenswrapper[4691]: I1124 09:34:58.087178 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_94ab9159-218c-42b9-9c38-8e0701f3eeef/container-updater/0.log" Nov 24 09:34:58 crc kubenswrapper[4691]: I1124 09:34:58.148495 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_94ab9159-218c-42b9-9c38-8e0701f3eeef/object-auditor/0.log" Nov 24 09:34:58 crc kubenswrapper[4691]: I1124 09:34:58.180729 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_94ab9159-218c-42b9-9c38-8e0701f3eeef/object-expirer/0.log" Nov 24 09:34:58 crc kubenswrapper[4691]: I1124 09:34:58.238762 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_94ab9159-218c-42b9-9c38-8e0701f3eeef/object-replicator/0.log" Nov 24 09:34:58 crc kubenswrapper[4691]: I1124 09:34:58.338366 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_94ab9159-218c-42b9-9c38-8e0701f3eeef/object-server/0.log" Nov 24 09:34:58 crc kubenswrapper[4691]: I1124 09:34:58.339566 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_94ab9159-218c-42b9-9c38-8e0701f3eeef/object-updater/0.log" Nov 24 09:34:58 crc kubenswrapper[4691]: I1124 09:34:58.471852 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_94ab9159-218c-42b9-9c38-8e0701f3eeef/swift-recon-cron/0.log" Nov 24 09:34:58 crc kubenswrapper[4691]: I1124 09:34:58.528750 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_94ab9159-218c-42b9-9c38-8e0701f3eeef/rsync/0.log" Nov 24 09:34:58 crc kubenswrapper[4691]: I1124 09:34:58.588914 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-kc5jp_b19b3af1-e299-46ab-b579-902390cb75a3/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 09:34:58 crc kubenswrapper[4691]: I1124 09:34:58.711880 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-zbkq4_fb487f8d-8df8-4b2d-9b08-647a942d8559/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 24 09:35:01 crc kubenswrapper[4691]: I1124 09:35:01.312808 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_cb7ce1da-e87a-4d10-b6ad-f9f2e0d022b4/memcached/0.log" Nov 24 09:35:03 crc kubenswrapper[4691]: I1124 09:35:03.760808 4691 scope.go:117] "RemoveContainer" containerID="9d5a7ae14d2062d9b51ce6e4e46f00f60586eef65cf9e6e4ed8bcf4d441bfe6c" Nov 24 09:35:03 crc kubenswrapper[4691]: E1124 09:35:03.762009 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:35:17 crc kubenswrapper[4691]: I1124 09:35:17.761222 4691 scope.go:117] "RemoveContainer" containerID="9d5a7ae14d2062d9b51ce6e4e46f00f60586eef65cf9e6e4ed8bcf4d441bfe6c" Nov 24 09:35:17 crc kubenswrapper[4691]: E1124 09:35:17.762037 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:35:19 crc kubenswrapper[4691]: I1124 09:35:19.781891 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw_d08a87e6-8ffb-408e-86d2-ff4994f07ed9/util/0.log" Nov 24 09:35:19 crc kubenswrapper[4691]: I1124 09:35:19.954903 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw_d08a87e6-8ffb-408e-86d2-ff4994f07ed9/pull/0.log" Nov 24 09:35:19 crc kubenswrapper[4691]: I1124 09:35:19.959589 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw_d08a87e6-8ffb-408e-86d2-ff4994f07ed9/util/0.log" Nov 24 09:35:19 crc kubenswrapper[4691]: I1124 09:35:19.972195 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw_d08a87e6-8ffb-408e-86d2-ff4994f07ed9/pull/0.log" Nov 24 09:35:20 crc kubenswrapper[4691]: I1124 09:35:20.135178 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw_d08a87e6-8ffb-408e-86d2-ff4994f07ed9/extract/0.log" Nov 24 09:35:20 crc kubenswrapper[4691]: I1124 09:35:20.154667 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw_d08a87e6-8ffb-408e-86d2-ff4994f07ed9/util/0.log" Nov 24 09:35:20 crc kubenswrapper[4691]: I1124 09:35:20.160603 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_0cd1699c2f540ed7a134d30d904a84ee6b82b2d30ac5ca491801f6b47cct8kw_d08a87e6-8ffb-408e-86d2-ff4994f07ed9/pull/0.log" Nov 24 09:35:20 crc kubenswrapper[4691]: I1124 09:35:20.302755 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-86dc4d89c8-ppdhs_22fec998-136d-4bc0-9db1-1e4ac6e1107c/kube-rbac-proxy/0.log" Nov 24 09:35:20 crc kubenswrapper[4691]: I1124 09:35:20.358609 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-86dc4d89c8-ppdhs_22fec998-136d-4bc0-9db1-1e4ac6e1107c/manager/0.log" Nov 24 09:35:20 crc kubenswrapper[4691]: I1124 09:35:20.364540 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-79856dc55c-6jgx4_132ed997-05f1-4484-a11a-3e282b0e889b/kube-rbac-proxy/0.log" Nov 24 09:35:20 crc kubenswrapper[4691]: I1124 09:35:20.521857 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-7d695c9b56-ncq2x_24f62db2-c526-493e-a703-43a661ea0228/kube-rbac-proxy/0.log" Nov 24 09:35:20 crc kubenswrapper[4691]: I1124 09:35:20.551383 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-79856dc55c-6jgx4_132ed997-05f1-4484-a11a-3e282b0e889b/manager/0.log" Nov 24 09:35:20 crc kubenswrapper[4691]: I1124 09:35:20.586363 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-7d695c9b56-ncq2x_24f62db2-c526-493e-a703-43a661ea0228/manager/0.log" Nov 24 09:35:20 crc kubenswrapper[4691]: I1124 09:35:20.735836 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-68b95954c9-f7g9v_bc63ea3b-7e56-46cc-b2e9-2ff7b0cddc2e/kube-rbac-proxy/0.log" Nov 24 09:35:20 crc kubenswrapper[4691]: I1124 09:35:20.896100 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-68b95954c9-f7g9v_bc63ea3b-7e56-46cc-b2e9-2ff7b0cddc2e/manager/0.log" Nov 24 09:35:20 crc kubenswrapper[4691]: I1124 09:35:20.919408 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-774b86978c-cql69_df3746c8-ec8b-406e-b2f5-7bd93dd46646/kube-rbac-proxy/0.log" Nov 24 09:35:20 crc kubenswrapper[4691]: I1124 09:35:20.949391 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-774b86978c-cql69_df3746c8-ec8b-406e-b2f5-7bd93dd46646/manager/0.log" Nov 24 09:35:21 crc kubenswrapper[4691]: I1124 09:35:21.099773 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c9694994-jctfk_39df322c-3527-4b0d-a719-4ecbfa944a56/kube-rbac-proxy/0.log" Nov 24 09:35:21 crc kubenswrapper[4691]: I1124 09:35:21.172055 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c9694994-jctfk_39df322c-3527-4b0d-a719-4ecbfa944a56/manager/0.log" Nov 24 09:35:21 crc kubenswrapper[4691]: I1124 09:35:21.240864 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-d5cc86f4b-v7vtk_7e82629b-ee44-488b-bdd3-58f078070f7e/kube-rbac-proxy/0.log" Nov 24 09:35:21 crc kubenswrapper[4691]: I1124 09:35:21.413298 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-d5cc86f4b-v7vtk_7e82629b-ee44-488b-bdd3-58f078070f7e/manager/0.log" Nov 24 09:35:21 crc kubenswrapper[4691]: I1124 09:35:21.429272 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-5bfcdc958c-nfx6g_f8a9119f-fc7e-4bb6-89da-91f7655c633d/kube-rbac-proxy/0.log" Nov 24 09:35:21 crc kubenswrapper[4691]: I1124 09:35:21.449075 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-5bfcdc958c-nfx6g_f8a9119f-fc7e-4bb6-89da-91f7655c633d/manager/0.log" Nov 24 09:35:21 crc kubenswrapper[4691]: I1124 09:35:21.639745 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-748dc6576f-2w275_be284da4-49c2-4967-a810-eb5dbece93a3/kube-rbac-proxy/0.log" Nov 24 09:35:21 crc kubenswrapper[4691]: I1124 09:35:21.690959 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-748dc6576f-2w275_be284da4-49c2-4967-a810-eb5dbece93a3/manager/0.log" Nov 24 09:35:21 crc kubenswrapper[4691]: I1124 09:35:21.813510 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58bb8d67cc-clqqr_c2acb14d-547e-4528-addc-5bb388370b04/kube-rbac-proxy/0.log" Nov 24 09:35:21 crc kubenswrapper[4691]: I1124 09:35:21.871345 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58bb8d67cc-clqqr_c2acb14d-547e-4528-addc-5bb388370b04/manager/0.log" Nov 24 09:35:21 crc kubenswrapper[4691]: I1124 09:35:21.913606 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-cb6c4fdb7-vnlb4_f4138dbf-cfaf-4a82-bf69-d6065584d1ba/kube-rbac-proxy/0.log" Nov 24 09:35:22 crc kubenswrapper[4691]: I1124 09:35:22.020477 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-cb6c4fdb7-vnlb4_f4138dbf-cfaf-4a82-bf69-d6065584d1ba/manager/0.log" Nov 24 09:35:22 crc kubenswrapper[4691]: I1124 09:35:22.107164 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-7c57c8bbc4-8tqbw_66685e8a-e196-444b-9149-e7861ff2c8b5/kube-rbac-proxy/0.log" Nov 24 09:35:22 crc kubenswrapper[4691]: I1124 09:35:22.137223 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-7c57c8bbc4-8tqbw_66685e8a-e196-444b-9149-e7861ff2c8b5/manager/0.log" Nov 24 09:35:22 crc kubenswrapper[4691]: I1124 09:35:22.295224 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-bh7th_0eb9999f-a946-4946-83e0-6cbf7be82741/kube-rbac-proxy/0.log" Nov 24 09:35:22 crc kubenswrapper[4691]: I1124 09:35:22.384058 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-bh7th_0eb9999f-a946-4946-83e0-6cbf7be82741/manager/0.log" Nov 24 09:35:22 crc kubenswrapper[4691]: I1124 09:35:22.452103 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-fd75fd47d-sr8nk_1c460dd6-5f3d-4eae-9436-c46ccd900674/kube-rbac-proxy/0.log" Nov 24 09:35:22 crc kubenswrapper[4691]: I1124 09:35:22.484053 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-fd75fd47d-sr8nk_1c460dd6-5f3d-4eae-9436-c46ccd900674/manager/0.log" Nov 24 09:35:22 crc kubenswrapper[4691]: I1124 09:35:22.568550 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d_b9f37eec-f8fc-4083-b29a-4e704c802c8a/kube-rbac-proxy/0.log" Nov 24 09:35:22 crc kubenswrapper[4691]: I1124 09:35:22.629417 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-544b9bb9-rdr7d_b9f37eec-f8fc-4083-b29a-4e704c802c8a/manager/0.log" Nov 24 09:35:22 crc kubenswrapper[4691]: I1124 09:35:22.930477 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-54cb99d74c-jrmkn_2aa0febc-e96d-419c-855c-bae0db1c6d11/operator/0.log" Nov 24 09:35:23 crc kubenswrapper[4691]: I1124 09:35:23.087855 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-4ppc7_c57ebb8d-e8cb-4e4a-af63-e79986c327a5/registry-server/0.log" Nov 24 09:35:23 crc kubenswrapper[4691]: I1124 09:35:23.149646 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-554b4f8994-dck8w_f46c7222-cbb0-457d-bb11-15d8cb855c8b/kube-rbac-proxy/0.log" Nov 24 09:35:23 crc kubenswrapper[4691]: I1124 09:35:23.332194 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5db546f9d9-kp2bb_63c87b6f-c210-4837-bde9-87436a88578f/kube-rbac-proxy/0.log" Nov 24 09:35:23 crc kubenswrapper[4691]: I1124 09:35:23.341641 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-554b4f8994-dck8w_f46c7222-cbb0-457d-bb11-15d8cb855c8b/manager/0.log" Nov 24 09:35:23 crc kubenswrapper[4691]: I1124 09:35:23.552364 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5db546f9d9-kp2bb_63c87b6f-c210-4837-bde9-87436a88578f/manager/0.log" Nov 24 09:35:23 crc kubenswrapper[4691]: I1124 09:35:23.559498 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-65lbw_f3bb505d-02c4-49ec-94c5-a349cb5a4468/operator/0.log" Nov 24 09:35:23 crc kubenswrapper[4691]: I1124 09:35:23.875574 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7888ffcffd-8jst5_603e76a3-8258-43ec-850b-d2c34845cd8b/manager/0.log" Nov 24 09:35:24 crc kubenswrapper[4691]: I1124 09:35:24.139290 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-6fdc4fcf86-wmpvm_c7e06db1-dbe0-48c4-ba25-ef962e6cd3d7/kube-rbac-proxy/0.log" Nov 24 09:35:24 crc kubenswrapper[4691]: I1124 09:35:24.147686 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-6fdc4fcf86-wmpvm_c7e06db1-dbe0-48c4-ba25-ef962e6cd3d7/manager/0.log" Nov 24 09:35:24 crc kubenswrapper[4691]: I1124 09:35:24.161011 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-567f98c9d-sn2x6_0ed3c5b9-6275-4aa7-9f4c-a5e7ae6404f2/kube-rbac-proxy/0.log" Nov 24 09:35:24 crc kubenswrapper[4691]: I1124 09:35:24.260023 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-567f98c9d-sn2x6_0ed3c5b9-6275-4aa7-9f4c-a5e7ae6404f2/manager/0.log" Nov 24 09:35:24 crc kubenswrapper[4691]: I1124 09:35:24.434497 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cb74df96-4czcs_ccc21638-592f-4e4f-87df-f95f79a5c23e/manager/0.log" Nov 24 09:35:24 crc kubenswrapper[4691]: I1124 09:35:24.439226 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cb74df96-4czcs_ccc21638-592f-4e4f-87df-f95f79a5c23e/kube-rbac-proxy/0.log" Nov 24 09:35:24 crc kubenswrapper[4691]: I1124 09:35:24.523181 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-864885998-8qh9m_345576fd-a4cd-4c76-8c81-3669a42be294/kube-rbac-proxy/0.log" Nov 24 09:35:24 crc kubenswrapper[4691]: I1124 09:35:24.587983 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-864885998-8qh9m_345576fd-a4cd-4c76-8c81-3669a42be294/manager/0.log" Nov 24 09:35:28 crc kubenswrapper[4691]: I1124 09:35:28.198368 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-ds6q2"] Nov 24 09:35:28 crc kubenswrapper[4691]: E1124 09:35:28.199272 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="950621d8-ef6c-46c3-a427-311404acecd6" containerName="extract-content" Nov 24 09:35:28 crc kubenswrapper[4691]: I1124 09:35:28.199285 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="950621d8-ef6c-46c3-a427-311404acecd6" containerName="extract-content" Nov 24 09:35:28 crc kubenswrapper[4691]: E1124 09:35:28.199328 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="950621d8-ef6c-46c3-a427-311404acecd6" containerName="registry-server" Nov 24 09:35:28 crc kubenswrapper[4691]: I1124 09:35:28.199334 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="950621d8-ef6c-46c3-a427-311404acecd6" containerName="registry-server" Nov 24 09:35:28 crc kubenswrapper[4691]: E1124 09:35:28.199351 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="950621d8-ef6c-46c3-a427-311404acecd6" containerName="extract-utilities" Nov 24 09:35:28 crc kubenswrapper[4691]: I1124 09:35:28.199358 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="950621d8-ef6c-46c3-a427-311404acecd6" containerName="extract-utilities" Nov 24 09:35:28 crc kubenswrapper[4691]: I1124 09:35:28.199562 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="950621d8-ef6c-46c3-a427-311404acecd6" containerName="registry-server" Nov 24 09:35:28 crc kubenswrapper[4691]: I1124 09:35:28.201293 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ds6q2" Nov 24 09:35:28 crc kubenswrapper[4691]: I1124 09:35:28.212423 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ds6q2"] Nov 24 09:35:28 crc kubenswrapper[4691]: I1124 09:35:28.276736 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6-utilities\") pod \"certified-operators-ds6q2\" (UID: \"8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6\") " pod="openshift-marketplace/certified-operators-ds6q2" Nov 24 09:35:28 crc kubenswrapper[4691]: I1124 09:35:28.276808 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7jq68\" (UniqueName: \"kubernetes.io/projected/8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6-kube-api-access-7jq68\") pod \"certified-operators-ds6q2\" (UID: \"8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6\") " pod="openshift-marketplace/certified-operators-ds6q2" Nov 24 09:35:28 crc kubenswrapper[4691]: I1124 09:35:28.276837 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6-catalog-content\") pod \"certified-operators-ds6q2\" (UID: \"8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6\") " pod="openshift-marketplace/certified-operators-ds6q2" Nov 24 09:35:28 crc kubenswrapper[4691]: I1124 09:35:28.378768 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6-utilities\") pod \"certified-operators-ds6q2\" (UID: \"8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6\") " pod="openshift-marketplace/certified-operators-ds6q2" Nov 24 09:35:28 crc kubenswrapper[4691]: I1124 09:35:28.378841 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7jq68\" (UniqueName: \"kubernetes.io/projected/8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6-kube-api-access-7jq68\") pod \"certified-operators-ds6q2\" (UID: \"8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6\") " pod="openshift-marketplace/certified-operators-ds6q2" Nov 24 09:35:28 crc kubenswrapper[4691]: I1124 09:35:28.378860 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6-catalog-content\") pod \"certified-operators-ds6q2\" (UID: \"8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6\") " pod="openshift-marketplace/certified-operators-ds6q2" Nov 24 09:35:28 crc kubenswrapper[4691]: I1124 09:35:28.379232 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6-utilities\") pod \"certified-operators-ds6q2\" (UID: \"8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6\") " pod="openshift-marketplace/certified-operators-ds6q2" Nov 24 09:35:28 crc kubenswrapper[4691]: I1124 09:35:28.379282 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6-catalog-content\") pod \"certified-operators-ds6q2\" (UID: \"8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6\") " pod="openshift-marketplace/certified-operators-ds6q2" Nov 24 09:35:28 crc kubenswrapper[4691]: I1124 09:35:28.396795 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7jq68\" (UniqueName: \"kubernetes.io/projected/8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6-kube-api-access-7jq68\") pod \"certified-operators-ds6q2\" (UID: \"8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6\") " pod="openshift-marketplace/certified-operators-ds6q2" Nov 24 09:35:28 crc kubenswrapper[4691]: I1124 09:35:28.529137 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ds6q2" Nov 24 09:35:29 crc kubenswrapper[4691]: I1124 09:35:29.120126 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ds6q2"] Nov 24 09:35:29 crc kubenswrapper[4691]: I1124 09:35:29.986862 4691 generic.go:334] "Generic (PLEG): container finished" podID="8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6" containerID="b8cf6684a541524253cef78ae89ec344c43e86852c8c0f067d6450574ba6fa0e" exitCode=0 Nov 24 09:35:29 crc kubenswrapper[4691]: I1124 09:35:29.987043 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ds6q2" event={"ID":"8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6","Type":"ContainerDied","Data":"b8cf6684a541524253cef78ae89ec344c43e86852c8c0f067d6450574ba6fa0e"} Nov 24 09:35:29 crc kubenswrapper[4691]: I1124 09:35:29.987168 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ds6q2" event={"ID":"8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6","Type":"ContainerStarted","Data":"99998ba0addedadd5f0fedb314885f0b1dc1915f90e8a0a09a033492e5e0b620"} Nov 24 09:35:30 crc kubenswrapper[4691]: I1124 09:35:30.997485 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ds6q2" event={"ID":"8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6","Type":"ContainerStarted","Data":"2606f7c309dbf4dda30bec6019b1a76e29592e9dc18555f93b1a70e0cc4ef66f"} Nov 24 09:35:31 crc kubenswrapper[4691]: I1124 09:35:31.762484 4691 scope.go:117] "RemoveContainer" containerID="9d5a7ae14d2062d9b51ce6e4e46f00f60586eef65cf9e6e4ed8bcf4d441bfe6c" Nov 24 09:35:31 crc kubenswrapper[4691]: E1124 09:35:31.762977 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:35:32 crc kubenswrapper[4691]: I1124 09:35:32.006620 4691 generic.go:334] "Generic (PLEG): container finished" podID="8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6" containerID="2606f7c309dbf4dda30bec6019b1a76e29592e9dc18555f93b1a70e0cc4ef66f" exitCode=0 Nov 24 09:35:32 crc kubenswrapper[4691]: I1124 09:35:32.006666 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ds6q2" event={"ID":"8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6","Type":"ContainerDied","Data":"2606f7c309dbf4dda30bec6019b1a76e29592e9dc18555f93b1a70e0cc4ef66f"} Nov 24 09:35:33 crc kubenswrapper[4691]: I1124 09:35:33.000691 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-fdvg4"] Nov 24 09:35:33 crc kubenswrapper[4691]: I1124 09:35:33.003715 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fdvg4" Nov 24 09:35:33 crc kubenswrapper[4691]: I1124 09:35:33.023805 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fdvg4"] Nov 24 09:35:33 crc kubenswrapper[4691]: I1124 09:35:33.046414 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ds6q2" event={"ID":"8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6","Type":"ContainerStarted","Data":"36af7b3ee50bab4468d9a0379b56bf78b9902e0b49824ac1a7879311cc55d805"} Nov 24 09:35:33 crc kubenswrapper[4691]: I1124 09:35:33.060587 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85281ab7-1522-4837-9b58-5ed9e7603bcf-utilities\") pod \"redhat-operators-fdvg4\" (UID: \"85281ab7-1522-4837-9b58-5ed9e7603bcf\") " pod="openshift-marketplace/redhat-operators-fdvg4" Nov 24 09:35:33 crc kubenswrapper[4691]: I1124 09:35:33.060704 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6d777\" (UniqueName: \"kubernetes.io/projected/85281ab7-1522-4837-9b58-5ed9e7603bcf-kube-api-access-6d777\") pod \"redhat-operators-fdvg4\" (UID: \"85281ab7-1522-4837-9b58-5ed9e7603bcf\") " pod="openshift-marketplace/redhat-operators-fdvg4" Nov 24 09:35:33 crc kubenswrapper[4691]: I1124 09:35:33.060741 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85281ab7-1522-4837-9b58-5ed9e7603bcf-catalog-content\") pod \"redhat-operators-fdvg4\" (UID: \"85281ab7-1522-4837-9b58-5ed9e7603bcf\") " pod="openshift-marketplace/redhat-operators-fdvg4" Nov 24 09:35:33 crc kubenswrapper[4691]: I1124 09:35:33.073386 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-ds6q2" podStartSLOduration=2.39650608 podStartE2EDuration="5.073368694s" podCreationTimestamp="2025-11-24 09:35:28 +0000 UTC" firstStartedPulling="2025-11-24 09:35:29.988440546 +0000 UTC m=+5891.987389795" lastFinishedPulling="2025-11-24 09:35:32.66530315 +0000 UTC m=+5894.664252409" observedRunningTime="2025-11-24 09:35:33.067383043 +0000 UTC m=+5895.066332302" watchObservedRunningTime="2025-11-24 09:35:33.073368694 +0000 UTC m=+5895.072317943" Nov 24 09:35:33 crc kubenswrapper[4691]: I1124 09:35:33.162270 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6d777\" (UniqueName: \"kubernetes.io/projected/85281ab7-1522-4837-9b58-5ed9e7603bcf-kube-api-access-6d777\") pod \"redhat-operators-fdvg4\" (UID: \"85281ab7-1522-4837-9b58-5ed9e7603bcf\") " pod="openshift-marketplace/redhat-operators-fdvg4" Nov 24 09:35:33 crc kubenswrapper[4691]: I1124 09:35:33.162340 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85281ab7-1522-4837-9b58-5ed9e7603bcf-catalog-content\") pod \"redhat-operators-fdvg4\" (UID: \"85281ab7-1522-4837-9b58-5ed9e7603bcf\") " pod="openshift-marketplace/redhat-operators-fdvg4" Nov 24 09:35:33 crc kubenswrapper[4691]: I1124 09:35:33.162441 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85281ab7-1522-4837-9b58-5ed9e7603bcf-utilities\") pod \"redhat-operators-fdvg4\" (UID: \"85281ab7-1522-4837-9b58-5ed9e7603bcf\") " pod="openshift-marketplace/redhat-operators-fdvg4" Nov 24 09:35:33 crc kubenswrapper[4691]: I1124 09:35:33.162997 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85281ab7-1522-4837-9b58-5ed9e7603bcf-catalog-content\") pod \"redhat-operators-fdvg4\" (UID: \"85281ab7-1522-4837-9b58-5ed9e7603bcf\") " pod="openshift-marketplace/redhat-operators-fdvg4" Nov 24 09:35:33 crc kubenswrapper[4691]: I1124 09:35:33.163015 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85281ab7-1522-4837-9b58-5ed9e7603bcf-utilities\") pod \"redhat-operators-fdvg4\" (UID: \"85281ab7-1522-4837-9b58-5ed9e7603bcf\") " pod="openshift-marketplace/redhat-operators-fdvg4" Nov 24 09:35:33 crc kubenswrapper[4691]: I1124 09:35:33.188976 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6d777\" (UniqueName: \"kubernetes.io/projected/85281ab7-1522-4837-9b58-5ed9e7603bcf-kube-api-access-6d777\") pod \"redhat-operators-fdvg4\" (UID: \"85281ab7-1522-4837-9b58-5ed9e7603bcf\") " pod="openshift-marketplace/redhat-operators-fdvg4" Nov 24 09:35:33 crc kubenswrapper[4691]: I1124 09:35:33.327009 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fdvg4" Nov 24 09:35:33 crc kubenswrapper[4691]: I1124 09:35:33.845972 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fdvg4"] Nov 24 09:35:33 crc kubenswrapper[4691]: W1124 09:35:33.849923 4691 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod85281ab7_1522_4837_9b58_5ed9e7603bcf.slice/crio-abd1082a21fdc45b87b2c28cc1b976db155856b28a3dbedfe6f51df663a2b2a2 WatchSource:0}: Error finding container abd1082a21fdc45b87b2c28cc1b976db155856b28a3dbedfe6f51df663a2b2a2: Status 404 returned error can't find the container with id abd1082a21fdc45b87b2c28cc1b976db155856b28a3dbedfe6f51df663a2b2a2 Nov 24 09:35:34 crc kubenswrapper[4691]: I1124 09:35:34.061018 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fdvg4" event={"ID":"85281ab7-1522-4837-9b58-5ed9e7603bcf","Type":"ContainerStarted","Data":"abd1082a21fdc45b87b2c28cc1b976db155856b28a3dbedfe6f51df663a2b2a2"} Nov 24 09:35:35 crc kubenswrapper[4691]: I1124 09:35:35.072010 4691 generic.go:334] "Generic (PLEG): container finished" podID="85281ab7-1522-4837-9b58-5ed9e7603bcf" containerID="8c5e8830db1c6da8dfb612f591e7797645006b29ab3bf34b367bd97387f6cbeb" exitCode=0 Nov 24 09:35:35 crc kubenswrapper[4691]: I1124 09:35:35.072207 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fdvg4" event={"ID":"85281ab7-1522-4837-9b58-5ed9e7603bcf","Type":"ContainerDied","Data":"8c5e8830db1c6da8dfb612f591e7797645006b29ab3bf34b367bd97387f6cbeb"} Nov 24 09:35:36 crc kubenswrapper[4691]: I1124 09:35:36.087092 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fdvg4" event={"ID":"85281ab7-1522-4837-9b58-5ed9e7603bcf","Type":"ContainerStarted","Data":"a2e4467718a51ff5615a335b179631be6b2bec47229c36e0017e84b04bc4ce77"} Nov 24 09:35:38 crc kubenswrapper[4691]: I1124 09:35:38.105886 4691 generic.go:334] "Generic (PLEG): container finished" podID="85281ab7-1522-4837-9b58-5ed9e7603bcf" containerID="a2e4467718a51ff5615a335b179631be6b2bec47229c36e0017e84b04bc4ce77" exitCode=0 Nov 24 09:35:38 crc kubenswrapper[4691]: I1124 09:35:38.106050 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fdvg4" event={"ID":"85281ab7-1522-4837-9b58-5ed9e7603bcf","Type":"ContainerDied","Data":"a2e4467718a51ff5615a335b179631be6b2bec47229c36e0017e84b04bc4ce77"} Nov 24 09:35:38 crc kubenswrapper[4691]: I1124 09:35:38.530392 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-ds6q2" Nov 24 09:35:38 crc kubenswrapper[4691]: I1124 09:35:38.530923 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-ds6q2" Nov 24 09:35:38 crc kubenswrapper[4691]: I1124 09:35:38.578239 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-ds6q2" Nov 24 09:35:39 crc kubenswrapper[4691]: I1124 09:35:39.177295 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-ds6q2" Nov 24 09:35:40 crc kubenswrapper[4691]: I1124 09:35:40.136032 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fdvg4" event={"ID":"85281ab7-1522-4837-9b58-5ed9e7603bcf","Type":"ContainerStarted","Data":"e21e73bbe0790d7366a6dba8be970a534735c91e887b67f9a64f6ffc908e7749"} Nov 24 09:35:40 crc kubenswrapper[4691]: I1124 09:35:40.157075 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-fdvg4" podStartSLOduration=4.175811039 podStartE2EDuration="8.157054717s" podCreationTimestamp="2025-11-24 09:35:32 +0000 UTC" firstStartedPulling="2025-11-24 09:35:35.074511862 +0000 UTC m=+5897.073461111" lastFinishedPulling="2025-11-24 09:35:39.05575553 +0000 UTC m=+5901.054704789" observedRunningTime="2025-11-24 09:35:40.150652765 +0000 UTC m=+5902.149602024" watchObservedRunningTime="2025-11-24 09:35:40.157054717 +0000 UTC m=+5902.156003966" Nov 24 09:35:41 crc kubenswrapper[4691]: I1124 09:35:41.391385 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ds6q2"] Nov 24 09:35:41 crc kubenswrapper[4691]: I1124 09:35:41.790148 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-4npq9_d7748b2b-46c9-4709-bb46-545d8209bb5f/control-plane-machine-set-operator/0.log" Nov 24 09:35:42 crc kubenswrapper[4691]: I1124 09:35:42.006223 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-m8bj7_12724cb5-e0ed-4c92-93e6-0f223dd11bea/kube-rbac-proxy/0.log" Nov 24 09:35:42 crc kubenswrapper[4691]: I1124 09:35:42.054755 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-m8bj7_12724cb5-e0ed-4c92-93e6-0f223dd11bea/machine-api-operator/0.log" Nov 24 09:35:42 crc kubenswrapper[4691]: I1124 09:35:42.152812 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-ds6q2" podUID="8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6" containerName="registry-server" containerID="cri-o://36af7b3ee50bab4468d9a0379b56bf78b9902e0b49824ac1a7879311cc55d805" gracePeriod=2 Nov 24 09:35:43 crc kubenswrapper[4691]: I1124 09:35:43.327821 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-fdvg4" Nov 24 09:35:43 crc kubenswrapper[4691]: I1124 09:35:43.328183 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-fdvg4" Nov 24 09:35:44 crc kubenswrapper[4691]: I1124 09:35:44.170425 4691 generic.go:334] "Generic (PLEG): container finished" podID="8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6" containerID="36af7b3ee50bab4468d9a0379b56bf78b9902e0b49824ac1a7879311cc55d805" exitCode=0 Nov 24 09:35:44 crc kubenswrapper[4691]: I1124 09:35:44.170490 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ds6q2" event={"ID":"8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6","Type":"ContainerDied","Data":"36af7b3ee50bab4468d9a0379b56bf78b9902e0b49824ac1a7879311cc55d805"} Nov 24 09:35:44 crc kubenswrapper[4691]: I1124 09:35:44.386226 4691 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fdvg4" podUID="85281ab7-1522-4837-9b58-5ed9e7603bcf" containerName="registry-server" probeResult="failure" output=< Nov 24 09:35:44 crc kubenswrapper[4691]: timeout: failed to connect service ":50051" within 1s Nov 24 09:35:44 crc kubenswrapper[4691]: > Nov 24 09:35:44 crc kubenswrapper[4691]: I1124 09:35:44.703743 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ds6q2" Nov 24 09:35:44 crc kubenswrapper[4691]: I1124 09:35:44.814535 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6-catalog-content\") pod \"8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6\" (UID: \"8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6\") " Nov 24 09:35:44 crc kubenswrapper[4691]: I1124 09:35:44.814984 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6-utilities\") pod \"8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6\" (UID: \"8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6\") " Nov 24 09:35:44 crc kubenswrapper[4691]: I1124 09:35:44.815034 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7jq68\" (UniqueName: \"kubernetes.io/projected/8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6-kube-api-access-7jq68\") pod \"8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6\" (UID: \"8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6\") " Nov 24 09:35:44 crc kubenswrapper[4691]: I1124 09:35:44.816037 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6-utilities" (OuterVolumeSpecName: "utilities") pod "8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6" (UID: "8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 09:35:44 crc kubenswrapper[4691]: I1124 09:35:44.823103 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6-kube-api-access-7jq68" (OuterVolumeSpecName: "kube-api-access-7jq68") pod "8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6" (UID: "8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6"). InnerVolumeSpecName "kube-api-access-7jq68". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 09:35:44 crc kubenswrapper[4691]: I1124 09:35:44.871702 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6" (UID: "8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 09:35:44 crc kubenswrapper[4691]: I1124 09:35:44.917462 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 09:35:44 crc kubenswrapper[4691]: I1124 09:35:44.917529 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7jq68\" (UniqueName: \"kubernetes.io/projected/8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6-kube-api-access-7jq68\") on node \"crc\" DevicePath \"\"" Nov 24 09:35:44 crc kubenswrapper[4691]: I1124 09:35:44.917544 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 09:35:45 crc kubenswrapper[4691]: I1124 09:35:45.181187 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ds6q2" event={"ID":"8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6","Type":"ContainerDied","Data":"99998ba0addedadd5f0fedb314885f0b1dc1915f90e8a0a09a033492e5e0b620"} Nov 24 09:35:45 crc kubenswrapper[4691]: I1124 09:35:45.181275 4691 scope.go:117] "RemoveContainer" containerID="36af7b3ee50bab4468d9a0379b56bf78b9902e0b49824ac1a7879311cc55d805" Nov 24 09:35:45 crc kubenswrapper[4691]: I1124 09:35:45.181277 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ds6q2" Nov 24 09:35:45 crc kubenswrapper[4691]: I1124 09:35:45.202325 4691 scope.go:117] "RemoveContainer" containerID="2606f7c309dbf4dda30bec6019b1a76e29592e9dc18555f93b1a70e0cc4ef66f" Nov 24 09:35:45 crc kubenswrapper[4691]: I1124 09:35:45.225264 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ds6q2"] Nov 24 09:35:45 crc kubenswrapper[4691]: I1124 09:35:45.234802 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-ds6q2"] Nov 24 09:35:45 crc kubenswrapper[4691]: I1124 09:35:45.239369 4691 scope.go:117] "RemoveContainer" containerID="b8cf6684a541524253cef78ae89ec344c43e86852c8c0f067d6450574ba6fa0e" Nov 24 09:35:45 crc kubenswrapper[4691]: I1124 09:35:45.760894 4691 scope.go:117] "RemoveContainer" containerID="9d5a7ae14d2062d9b51ce6e4e46f00f60586eef65cf9e6e4ed8bcf4d441bfe6c" Nov 24 09:35:45 crc kubenswrapper[4691]: E1124 09:35:45.761404 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:35:46 crc kubenswrapper[4691]: I1124 09:35:46.774853 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6" path="/var/lib/kubelet/pods/8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6/volumes" Nov 24 09:35:53 crc kubenswrapper[4691]: I1124 09:35:53.379627 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-fdvg4" Nov 24 09:35:53 crc kubenswrapper[4691]: I1124 09:35:53.440681 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-fdvg4" Nov 24 09:35:53 crc kubenswrapper[4691]: I1124 09:35:53.622801 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fdvg4"] Nov 24 09:35:53 crc kubenswrapper[4691]: I1124 09:35:53.885408 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-58b88_345048fa-fc45-40c3-bd90-e517c3594a2a/cert-manager-controller/0.log" Nov 24 09:35:54 crc kubenswrapper[4691]: I1124 09:35:54.073321 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-5rltx_78886f3b-0708-4e26-bc7d-ade51d1b3e9c/cert-manager-cainjector/0.log" Nov 24 09:35:54 crc kubenswrapper[4691]: I1124 09:35:54.126857 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-lmkrt_40cf1922-077a-482f-9ffa-7dd636da29ef/cert-manager-webhook/0.log" Nov 24 09:35:55 crc kubenswrapper[4691]: I1124 09:35:55.272701 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-fdvg4" podUID="85281ab7-1522-4837-9b58-5ed9e7603bcf" containerName="registry-server" containerID="cri-o://e21e73bbe0790d7366a6dba8be970a534735c91e887b67f9a64f6ffc908e7749" gracePeriod=2 Nov 24 09:35:55 crc kubenswrapper[4691]: I1124 09:35:55.752904 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fdvg4" Nov 24 09:35:55 crc kubenswrapper[4691]: I1124 09:35:55.817054 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85281ab7-1522-4837-9b58-5ed9e7603bcf-utilities\") pod \"85281ab7-1522-4837-9b58-5ed9e7603bcf\" (UID: \"85281ab7-1522-4837-9b58-5ed9e7603bcf\") " Nov 24 09:35:55 crc kubenswrapper[4691]: I1124 09:35:55.817183 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6d777\" (UniqueName: \"kubernetes.io/projected/85281ab7-1522-4837-9b58-5ed9e7603bcf-kube-api-access-6d777\") pod \"85281ab7-1522-4837-9b58-5ed9e7603bcf\" (UID: \"85281ab7-1522-4837-9b58-5ed9e7603bcf\") " Nov 24 09:35:55 crc kubenswrapper[4691]: I1124 09:35:55.817230 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85281ab7-1522-4837-9b58-5ed9e7603bcf-catalog-content\") pod \"85281ab7-1522-4837-9b58-5ed9e7603bcf\" (UID: \"85281ab7-1522-4837-9b58-5ed9e7603bcf\") " Nov 24 09:35:55 crc kubenswrapper[4691]: I1124 09:35:55.818159 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/85281ab7-1522-4837-9b58-5ed9e7603bcf-utilities" (OuterVolumeSpecName: "utilities") pod "85281ab7-1522-4837-9b58-5ed9e7603bcf" (UID: "85281ab7-1522-4837-9b58-5ed9e7603bcf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 09:35:55 crc kubenswrapper[4691]: I1124 09:35:55.818268 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85281ab7-1522-4837-9b58-5ed9e7603bcf-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 09:35:55 crc kubenswrapper[4691]: I1124 09:35:55.825470 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/85281ab7-1522-4837-9b58-5ed9e7603bcf-kube-api-access-6d777" (OuterVolumeSpecName: "kube-api-access-6d777") pod "85281ab7-1522-4837-9b58-5ed9e7603bcf" (UID: "85281ab7-1522-4837-9b58-5ed9e7603bcf"). InnerVolumeSpecName "kube-api-access-6d777". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 09:35:55 crc kubenswrapper[4691]: I1124 09:35:55.910160 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/85281ab7-1522-4837-9b58-5ed9e7603bcf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "85281ab7-1522-4837-9b58-5ed9e7603bcf" (UID: "85281ab7-1522-4837-9b58-5ed9e7603bcf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 09:35:55 crc kubenswrapper[4691]: I1124 09:35:55.921004 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6d777\" (UniqueName: \"kubernetes.io/projected/85281ab7-1522-4837-9b58-5ed9e7603bcf-kube-api-access-6d777\") on node \"crc\" DevicePath \"\"" Nov 24 09:35:55 crc kubenswrapper[4691]: I1124 09:35:55.921054 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85281ab7-1522-4837-9b58-5ed9e7603bcf-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 09:35:56 crc kubenswrapper[4691]: I1124 09:35:56.283185 4691 generic.go:334] "Generic (PLEG): container finished" podID="85281ab7-1522-4837-9b58-5ed9e7603bcf" containerID="e21e73bbe0790d7366a6dba8be970a534735c91e887b67f9a64f6ffc908e7749" exitCode=0 Nov 24 09:35:56 crc kubenswrapper[4691]: I1124 09:35:56.283235 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fdvg4" event={"ID":"85281ab7-1522-4837-9b58-5ed9e7603bcf","Type":"ContainerDied","Data":"e21e73bbe0790d7366a6dba8be970a534735c91e887b67f9a64f6ffc908e7749"} Nov 24 09:35:56 crc kubenswrapper[4691]: I1124 09:35:56.283250 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fdvg4" Nov 24 09:35:56 crc kubenswrapper[4691]: I1124 09:35:56.283270 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fdvg4" event={"ID":"85281ab7-1522-4837-9b58-5ed9e7603bcf","Type":"ContainerDied","Data":"abd1082a21fdc45b87b2c28cc1b976db155856b28a3dbedfe6f51df663a2b2a2"} Nov 24 09:35:56 crc kubenswrapper[4691]: I1124 09:35:56.283295 4691 scope.go:117] "RemoveContainer" containerID="e21e73bbe0790d7366a6dba8be970a534735c91e887b67f9a64f6ffc908e7749" Nov 24 09:35:56 crc kubenswrapper[4691]: I1124 09:35:56.305744 4691 scope.go:117] "RemoveContainer" containerID="a2e4467718a51ff5615a335b179631be6b2bec47229c36e0017e84b04bc4ce77" Nov 24 09:35:56 crc kubenswrapper[4691]: I1124 09:35:56.318040 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fdvg4"] Nov 24 09:35:56 crc kubenswrapper[4691]: I1124 09:35:56.325714 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-fdvg4"] Nov 24 09:35:56 crc kubenswrapper[4691]: I1124 09:35:56.340223 4691 scope.go:117] "RemoveContainer" containerID="8c5e8830db1c6da8dfb612f591e7797645006b29ab3bf34b367bd97387f6cbeb" Nov 24 09:35:56 crc kubenswrapper[4691]: I1124 09:35:56.383072 4691 scope.go:117] "RemoveContainer" containerID="e21e73bbe0790d7366a6dba8be970a534735c91e887b67f9a64f6ffc908e7749" Nov 24 09:35:56 crc kubenswrapper[4691]: E1124 09:35:56.383920 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e21e73bbe0790d7366a6dba8be970a534735c91e887b67f9a64f6ffc908e7749\": container with ID starting with e21e73bbe0790d7366a6dba8be970a534735c91e887b67f9a64f6ffc908e7749 not found: ID does not exist" containerID="e21e73bbe0790d7366a6dba8be970a534735c91e887b67f9a64f6ffc908e7749" Nov 24 09:35:56 crc kubenswrapper[4691]: I1124 09:35:56.383958 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e21e73bbe0790d7366a6dba8be970a534735c91e887b67f9a64f6ffc908e7749"} err="failed to get container status \"e21e73bbe0790d7366a6dba8be970a534735c91e887b67f9a64f6ffc908e7749\": rpc error: code = NotFound desc = could not find container \"e21e73bbe0790d7366a6dba8be970a534735c91e887b67f9a64f6ffc908e7749\": container with ID starting with e21e73bbe0790d7366a6dba8be970a534735c91e887b67f9a64f6ffc908e7749 not found: ID does not exist" Nov 24 09:35:56 crc kubenswrapper[4691]: I1124 09:35:56.383982 4691 scope.go:117] "RemoveContainer" containerID="a2e4467718a51ff5615a335b179631be6b2bec47229c36e0017e84b04bc4ce77" Nov 24 09:35:56 crc kubenswrapper[4691]: E1124 09:35:56.384425 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a2e4467718a51ff5615a335b179631be6b2bec47229c36e0017e84b04bc4ce77\": container with ID starting with a2e4467718a51ff5615a335b179631be6b2bec47229c36e0017e84b04bc4ce77 not found: ID does not exist" containerID="a2e4467718a51ff5615a335b179631be6b2bec47229c36e0017e84b04bc4ce77" Nov 24 09:35:56 crc kubenswrapper[4691]: I1124 09:35:56.384472 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2e4467718a51ff5615a335b179631be6b2bec47229c36e0017e84b04bc4ce77"} err="failed to get container status \"a2e4467718a51ff5615a335b179631be6b2bec47229c36e0017e84b04bc4ce77\": rpc error: code = NotFound desc = could not find container \"a2e4467718a51ff5615a335b179631be6b2bec47229c36e0017e84b04bc4ce77\": container with ID starting with a2e4467718a51ff5615a335b179631be6b2bec47229c36e0017e84b04bc4ce77 not found: ID does not exist" Nov 24 09:35:56 crc kubenswrapper[4691]: I1124 09:35:56.384496 4691 scope.go:117] "RemoveContainer" containerID="8c5e8830db1c6da8dfb612f591e7797645006b29ab3bf34b367bd97387f6cbeb" Nov 24 09:35:56 crc kubenswrapper[4691]: E1124 09:35:56.384814 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8c5e8830db1c6da8dfb612f591e7797645006b29ab3bf34b367bd97387f6cbeb\": container with ID starting with 8c5e8830db1c6da8dfb612f591e7797645006b29ab3bf34b367bd97387f6cbeb not found: ID does not exist" containerID="8c5e8830db1c6da8dfb612f591e7797645006b29ab3bf34b367bd97387f6cbeb" Nov 24 09:35:56 crc kubenswrapper[4691]: I1124 09:35:56.384842 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c5e8830db1c6da8dfb612f591e7797645006b29ab3bf34b367bd97387f6cbeb"} err="failed to get container status \"8c5e8830db1c6da8dfb612f591e7797645006b29ab3bf34b367bd97387f6cbeb\": rpc error: code = NotFound desc = could not find container \"8c5e8830db1c6da8dfb612f591e7797645006b29ab3bf34b367bd97387f6cbeb\": container with ID starting with 8c5e8830db1c6da8dfb612f591e7797645006b29ab3bf34b367bd97387f6cbeb not found: ID does not exist" Nov 24 09:35:56 crc kubenswrapper[4691]: I1124 09:35:56.777042 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="85281ab7-1522-4837-9b58-5ed9e7603bcf" path="/var/lib/kubelet/pods/85281ab7-1522-4837-9b58-5ed9e7603bcf/volumes" Nov 24 09:35:57 crc kubenswrapper[4691]: I1124 09:35:57.760438 4691 scope.go:117] "RemoveContainer" containerID="9d5a7ae14d2062d9b51ce6e4e46f00f60586eef65cf9e6e4ed8bcf4d441bfe6c" Nov 24 09:35:57 crc kubenswrapper[4691]: E1124 09:35:57.761214 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:36:05 crc kubenswrapper[4691]: I1124 09:36:05.578142 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-nrlp6_8a7fc372-f01f-497b-b1bd-c508371d6069/nmstate-console-plugin/0.log" Nov 24 09:36:05 crc kubenswrapper[4691]: I1124 09:36:05.799023 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-sk5rc_17c32358-060b-4f32-abec-0eac2e40eca1/nmstate-handler/0.log" Nov 24 09:36:05 crc kubenswrapper[4691]: I1124 09:36:05.799721 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-fjpwj_6fc26c17-4027-42aa-821e-b3e5c1f92226/nmstate-metrics/0.log" Nov 24 09:36:05 crc kubenswrapper[4691]: I1124 09:36:05.819562 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-fjpwj_6fc26c17-4027-42aa-821e-b3e5c1f92226/kube-rbac-proxy/0.log" Nov 24 09:36:06 crc kubenswrapper[4691]: I1124 09:36:06.009269 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-5fxr5_cd1dce5a-f168-4208-879b-f4132bf30307/nmstate-operator/0.log" Nov 24 09:36:06 crc kubenswrapper[4691]: I1124 09:36:06.028148 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-xq22b_d877fe85-0260-4e8f-89c9-ad96a8466bee/nmstate-webhook/0.log" Nov 24 09:36:08 crc kubenswrapper[4691]: I1124 09:36:08.777547 4691 scope.go:117] "RemoveContainer" containerID="9d5a7ae14d2062d9b51ce6e4e46f00f60586eef65cf9e6e4ed8bcf4d441bfe6c" Nov 24 09:36:08 crc kubenswrapper[4691]: E1124 09:36:08.778816 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:36:19 crc kubenswrapper[4691]: I1124 09:36:19.143744 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-wgxjz_8a2bb6bf-c15d-40aa-9af4-b4c55f67acff/kube-rbac-proxy/0.log" Nov 24 09:36:19 crc kubenswrapper[4691]: I1124 09:36:19.313207 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-wgxjz_8a2bb6bf-c15d-40aa-9af4-b4c55f67acff/controller/0.log" Nov 24 09:36:19 crc kubenswrapper[4691]: I1124 09:36:19.397966 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/cp-frr-files/0.log" Nov 24 09:36:19 crc kubenswrapper[4691]: I1124 09:36:19.593396 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/cp-frr-files/0.log" Nov 24 09:36:19 crc kubenswrapper[4691]: I1124 09:36:19.621596 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/cp-reloader/0.log" Nov 24 09:36:19 crc kubenswrapper[4691]: I1124 09:36:19.622694 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/cp-reloader/0.log" Nov 24 09:36:19 crc kubenswrapper[4691]: I1124 09:36:19.642710 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/cp-metrics/0.log" Nov 24 09:36:19 crc kubenswrapper[4691]: I1124 09:36:19.760336 4691 scope.go:117] "RemoveContainer" containerID="9d5a7ae14d2062d9b51ce6e4e46f00f60586eef65cf9e6e4ed8bcf4d441bfe6c" Nov 24 09:36:19 crc kubenswrapper[4691]: E1124 09:36:19.760673 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:36:19 crc kubenswrapper[4691]: I1124 09:36:19.835993 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/cp-reloader/0.log" Nov 24 09:36:19 crc kubenswrapper[4691]: I1124 09:36:19.838560 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/cp-metrics/0.log" Nov 24 09:36:19 crc kubenswrapper[4691]: I1124 09:36:19.842404 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/cp-metrics/0.log" Nov 24 09:36:19 crc kubenswrapper[4691]: I1124 09:36:19.844481 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/cp-frr-files/0.log" Nov 24 09:36:20 crc kubenswrapper[4691]: I1124 09:36:20.009864 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/cp-frr-files/0.log" Nov 24 09:36:20 crc kubenswrapper[4691]: I1124 09:36:20.022534 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/cp-reloader/0.log" Nov 24 09:36:20 crc kubenswrapper[4691]: I1124 09:36:20.077144 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/cp-metrics/0.log" Nov 24 09:36:20 crc kubenswrapper[4691]: I1124 09:36:20.102104 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/controller/0.log" Nov 24 09:36:20 crc kubenswrapper[4691]: I1124 09:36:20.274061 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/frr-metrics/0.log" Nov 24 09:36:20 crc kubenswrapper[4691]: I1124 09:36:20.281511 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/kube-rbac-proxy/0.log" Nov 24 09:36:20 crc kubenswrapper[4691]: I1124 09:36:20.301487 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/kube-rbac-proxy-frr/0.log" Nov 24 09:36:20 crc kubenswrapper[4691]: I1124 09:36:20.480783 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/reloader/0.log" Nov 24 09:36:20 crc kubenswrapper[4691]: I1124 09:36:20.526301 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-bbnct_e740c0fa-a972-42fe-8e95-aaed01b46916/frr-k8s-webhook-server/0.log" Nov 24 09:36:20 crc kubenswrapper[4691]: I1124 09:36:20.785100 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-86b4f5566f-xknt4_5c353a67-2f3f-4608-a19e-406c31bae85a/manager/0.log" Nov 24 09:36:20 crc kubenswrapper[4691]: I1124 09:36:20.881296 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-577d7cd9f7-t5x6j_42e2daa4-034f-4fe6-852e-479d1a2570bb/webhook-server/0.log" Nov 24 09:36:21 crc kubenswrapper[4691]: I1124 09:36:21.087536 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-b884p_e76d55fd-c894-4236-8921-8b60a88125f7/kube-rbac-proxy/0.log" Nov 24 09:36:21 crc kubenswrapper[4691]: I1124 09:36:21.622874 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-b884p_e76d55fd-c894-4236-8921-8b60a88125f7/speaker/0.log" Nov 24 09:36:21 crc kubenswrapper[4691]: I1124 09:36:21.903886 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-28hsg_428c03ad-0ec5-4e31-84e1-1a30cba68bc7/frr/0.log" Nov 24 09:36:31 crc kubenswrapper[4691]: I1124 09:36:31.761706 4691 scope.go:117] "RemoveContainer" containerID="9d5a7ae14d2062d9b51ce6e4e46f00f60586eef65cf9e6e4ed8bcf4d441bfe6c" Nov 24 09:36:31 crc kubenswrapper[4691]: E1124 09:36:31.763770 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:36:32 crc kubenswrapper[4691]: I1124 09:36:32.753074 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l_05d371c2-4e47-4da1-bfc9-b53160c5d377/util/0.log" Nov 24 09:36:32 crc kubenswrapper[4691]: I1124 09:36:32.979960 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l_05d371c2-4e47-4da1-bfc9-b53160c5d377/pull/0.log" Nov 24 09:36:32 crc kubenswrapper[4691]: I1124 09:36:32.982654 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l_05d371c2-4e47-4da1-bfc9-b53160c5d377/util/0.log" Nov 24 09:36:33 crc kubenswrapper[4691]: I1124 09:36:33.027351 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l_05d371c2-4e47-4da1-bfc9-b53160c5d377/pull/0.log" Nov 24 09:36:33 crc kubenswrapper[4691]: I1124 09:36:33.180663 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l_05d371c2-4e47-4da1-bfc9-b53160c5d377/pull/0.log" Nov 24 09:36:33 crc kubenswrapper[4691]: I1124 09:36:33.188242 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l_05d371c2-4e47-4da1-bfc9-b53160c5d377/util/0.log" Nov 24 09:36:33 crc kubenswrapper[4691]: I1124 09:36:33.215540 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ewnh6l_05d371c2-4e47-4da1-bfc9-b53160c5d377/extract/0.log" Nov 24 09:36:33 crc kubenswrapper[4691]: I1124 09:36:33.356266 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jq477_5a985ac0-8176-499f-86d2-f58210944072/extract-utilities/0.log" Nov 24 09:36:33 crc kubenswrapper[4691]: I1124 09:36:33.527840 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jq477_5a985ac0-8176-499f-86d2-f58210944072/extract-content/0.log" Nov 24 09:36:33 crc kubenswrapper[4691]: I1124 09:36:33.531713 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jq477_5a985ac0-8176-499f-86d2-f58210944072/extract-content/0.log" Nov 24 09:36:33 crc kubenswrapper[4691]: I1124 09:36:33.533788 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jq477_5a985ac0-8176-499f-86d2-f58210944072/extract-utilities/0.log" Nov 24 09:36:33 crc kubenswrapper[4691]: I1124 09:36:33.741874 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jq477_5a985ac0-8176-499f-86d2-f58210944072/extract-content/0.log" Nov 24 09:36:33 crc kubenswrapper[4691]: I1124 09:36:33.771932 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jq477_5a985ac0-8176-499f-86d2-f58210944072/extract-utilities/0.log" Nov 24 09:36:33 crc kubenswrapper[4691]: I1124 09:36:33.965168 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zkkjt_25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409/extract-utilities/0.log" Nov 24 09:36:34 crc kubenswrapper[4691]: I1124 09:36:34.207946 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zkkjt_25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409/extract-utilities/0.log" Nov 24 09:36:34 crc kubenswrapper[4691]: I1124 09:36:34.248061 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zkkjt_25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409/extract-content/0.log" Nov 24 09:36:34 crc kubenswrapper[4691]: I1124 09:36:34.257695 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zkkjt_25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409/extract-content/0.log" Nov 24 09:36:34 crc kubenswrapper[4691]: I1124 09:36:34.484597 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jq477_5a985ac0-8176-499f-86d2-f58210944072/registry-server/0.log" Nov 24 09:36:34 crc kubenswrapper[4691]: I1124 09:36:34.524173 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zkkjt_25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409/extract-content/0.log" Nov 24 09:36:34 crc kubenswrapper[4691]: I1124 09:36:34.552045 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zkkjt_25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409/extract-utilities/0.log" Nov 24 09:36:34 crc kubenswrapper[4691]: I1124 09:36:34.731300 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd_6a028a1d-8792-4943-bbca-370668d5c1b2/util/0.log" Nov 24 09:36:35 crc kubenswrapper[4691]: I1124 09:36:35.048958 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd_6a028a1d-8792-4943-bbca-370668d5c1b2/pull/0.log" Nov 24 09:36:35 crc kubenswrapper[4691]: I1124 09:36:35.062213 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd_6a028a1d-8792-4943-bbca-370668d5c1b2/util/0.log" Nov 24 09:36:35 crc kubenswrapper[4691]: I1124 09:36:35.099840 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd_6a028a1d-8792-4943-bbca-370668d5c1b2/pull/0.log" Nov 24 09:36:35 crc kubenswrapper[4691]: I1124 09:36:35.309012 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd_6a028a1d-8792-4943-bbca-370668d5c1b2/extract/0.log" Nov 24 09:36:35 crc kubenswrapper[4691]: I1124 09:36:35.351104 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd_6a028a1d-8792-4943-bbca-370668d5c1b2/pull/0.log" Nov 24 09:36:35 crc kubenswrapper[4691]: I1124 09:36:35.357161 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6h96hd_6a028a1d-8792-4943-bbca-370668d5c1b2/util/0.log" Nov 24 09:36:35 crc kubenswrapper[4691]: I1124 09:36:35.508327 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zkkjt_25c29cf2-bd0e-42fa-baa8-a8b2b7ee1409/registry-server/0.log" Nov 24 09:36:35 crc kubenswrapper[4691]: I1124 09:36:35.601208 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-wgwxf_74a9daa2-7bfc-487c-9990-9848391da95d/marketplace-operator/0.log" Nov 24 09:36:35 crc kubenswrapper[4691]: I1124 09:36:35.704019 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ggwnq_e4c460a6-e0d6-48d6-a225-b4e73926b492/extract-utilities/0.log" Nov 24 09:36:35 crc kubenswrapper[4691]: I1124 09:36:35.917159 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ggwnq_e4c460a6-e0d6-48d6-a225-b4e73926b492/extract-utilities/0.log" Nov 24 09:36:35 crc kubenswrapper[4691]: I1124 09:36:35.952216 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ggwnq_e4c460a6-e0d6-48d6-a225-b4e73926b492/extract-content/0.log" Nov 24 09:36:35 crc kubenswrapper[4691]: I1124 09:36:35.962428 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ggwnq_e4c460a6-e0d6-48d6-a225-b4e73926b492/extract-content/0.log" Nov 24 09:36:36 crc kubenswrapper[4691]: I1124 09:36:36.124596 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ggwnq_e4c460a6-e0d6-48d6-a225-b4e73926b492/extract-content/0.log" Nov 24 09:36:36 crc kubenswrapper[4691]: I1124 09:36:36.142220 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ggwnq_e4c460a6-e0d6-48d6-a225-b4e73926b492/extract-utilities/0.log" Nov 24 09:36:36 crc kubenswrapper[4691]: I1124 09:36:36.326304 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-dxnpr_d053d137-22fe-4850-8694-717346625cf6/extract-utilities/0.log" Nov 24 09:36:36 crc kubenswrapper[4691]: I1124 09:36:36.364327 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ggwnq_e4c460a6-e0d6-48d6-a225-b4e73926b492/registry-server/0.log" Nov 24 09:36:36 crc kubenswrapper[4691]: I1124 09:36:36.508606 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-dxnpr_d053d137-22fe-4850-8694-717346625cf6/extract-utilities/0.log" Nov 24 09:36:36 crc kubenswrapper[4691]: I1124 09:36:36.522556 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-dxnpr_d053d137-22fe-4850-8694-717346625cf6/extract-content/0.log" Nov 24 09:36:36 crc kubenswrapper[4691]: I1124 09:36:36.539172 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-dxnpr_d053d137-22fe-4850-8694-717346625cf6/extract-content/0.log" Nov 24 09:36:36 crc kubenswrapper[4691]: I1124 09:36:36.680496 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-dxnpr_d053d137-22fe-4850-8694-717346625cf6/extract-utilities/0.log" Nov 24 09:36:36 crc kubenswrapper[4691]: I1124 09:36:36.680890 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-dxnpr_d053d137-22fe-4850-8694-717346625cf6/extract-content/0.log" Nov 24 09:36:36 crc kubenswrapper[4691]: I1124 09:36:36.896884 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-dxnpr_d053d137-22fe-4850-8694-717346625cf6/registry-server/0.log" Nov 24 09:36:42 crc kubenswrapper[4691]: I1124 09:36:42.766653 4691 scope.go:117] "RemoveContainer" containerID="9d5a7ae14d2062d9b51ce6e4e46f00f60586eef65cf9e6e4ed8bcf4d441bfe6c" Nov 24 09:36:42 crc kubenswrapper[4691]: E1124 09:36:42.767508 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:36:54 crc kubenswrapper[4691]: I1124 09:36:54.766372 4691 scope.go:117] "RemoveContainer" containerID="9d5a7ae14d2062d9b51ce6e4e46f00f60586eef65cf9e6e4ed8bcf4d441bfe6c" Nov 24 09:36:54 crc kubenswrapper[4691]: E1124 09:36:54.767235 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:37:07 crc kubenswrapper[4691]: I1124 09:37:07.761315 4691 scope.go:117] "RemoveContainer" containerID="9d5a7ae14d2062d9b51ce6e4e46f00f60586eef65cf9e6e4ed8bcf4d441bfe6c" Nov 24 09:37:07 crc kubenswrapper[4691]: E1124 09:37:07.762283 4691 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-fcwmc_openshift-machine-config-operator(54ccc455-9127-4afd-b3a4-7fc35181bf93)\"" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" Nov 24 09:37:21 crc kubenswrapper[4691]: I1124 09:37:21.760251 4691 scope.go:117] "RemoveContainer" containerID="9d5a7ae14d2062d9b51ce6e4e46f00f60586eef65cf9e6e4ed8bcf4d441bfe6c" Nov 24 09:37:23 crc kubenswrapper[4691]: I1124 09:37:23.041922 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerStarted","Data":"3d4d370f4b40a169fa9cd55b58a6a65b5bfb32180c07eaee9baf9b0b5956f84f"} Nov 24 09:38:40 crc kubenswrapper[4691]: I1124 09:38:40.815053 4691 generic.go:334] "Generic (PLEG): container finished" podID="0f1728cc-efe7-4b77-9fa5-604056334a1f" containerID="0839b9a60de1b0df703e9b3cf4dd93d84ff00c62ec535d49fe38bfb2c84369a5" exitCode=0 Nov 24 09:38:40 crc kubenswrapper[4691]: I1124 09:38:40.815157 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-t72qw/must-gather-k4bnj" event={"ID":"0f1728cc-efe7-4b77-9fa5-604056334a1f","Type":"ContainerDied","Data":"0839b9a60de1b0df703e9b3cf4dd93d84ff00c62ec535d49fe38bfb2c84369a5"} Nov 24 09:38:40 crc kubenswrapper[4691]: I1124 09:38:40.816221 4691 scope.go:117] "RemoveContainer" containerID="0839b9a60de1b0df703e9b3cf4dd93d84ff00c62ec535d49fe38bfb2c84369a5" Nov 24 09:38:41 crc kubenswrapper[4691]: I1124 09:38:41.257579 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-t72qw_must-gather-k4bnj_0f1728cc-efe7-4b77-9fa5-604056334a1f/gather/0.log" Nov 24 09:38:51 crc kubenswrapper[4691]: I1124 09:38:51.951411 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-t72qw/must-gather-k4bnj"] Nov 24 09:38:51 crc kubenswrapper[4691]: I1124 09:38:51.952276 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-t72qw/must-gather-k4bnj" podUID="0f1728cc-efe7-4b77-9fa5-604056334a1f" containerName="copy" containerID="cri-o://b1dbe6d19ef8767cc919b3e5ad5e4ab450f50dec394c648e0b6f7568fdb0995d" gracePeriod=2 Nov 24 09:38:51 crc kubenswrapper[4691]: I1124 09:38:51.959991 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-t72qw/must-gather-k4bnj"] Nov 24 09:38:52 crc kubenswrapper[4691]: I1124 09:38:52.372004 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-t72qw_must-gather-k4bnj_0f1728cc-efe7-4b77-9fa5-604056334a1f/copy/0.log" Nov 24 09:38:52 crc kubenswrapper[4691]: I1124 09:38:52.372607 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-t72qw/must-gather-k4bnj" Nov 24 09:38:52 crc kubenswrapper[4691]: I1124 09:38:52.478062 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vbv29\" (UniqueName: \"kubernetes.io/projected/0f1728cc-efe7-4b77-9fa5-604056334a1f-kube-api-access-vbv29\") pod \"0f1728cc-efe7-4b77-9fa5-604056334a1f\" (UID: \"0f1728cc-efe7-4b77-9fa5-604056334a1f\") " Nov 24 09:38:52 crc kubenswrapper[4691]: I1124 09:38:52.478379 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/0f1728cc-efe7-4b77-9fa5-604056334a1f-must-gather-output\") pod \"0f1728cc-efe7-4b77-9fa5-604056334a1f\" (UID: \"0f1728cc-efe7-4b77-9fa5-604056334a1f\") " Nov 24 09:38:52 crc kubenswrapper[4691]: I1124 09:38:52.484052 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f1728cc-efe7-4b77-9fa5-604056334a1f-kube-api-access-vbv29" (OuterVolumeSpecName: "kube-api-access-vbv29") pod "0f1728cc-efe7-4b77-9fa5-604056334a1f" (UID: "0f1728cc-efe7-4b77-9fa5-604056334a1f"). InnerVolumeSpecName "kube-api-access-vbv29". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 09:38:52 crc kubenswrapper[4691]: I1124 09:38:52.580729 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vbv29\" (UniqueName: \"kubernetes.io/projected/0f1728cc-efe7-4b77-9fa5-604056334a1f-kube-api-access-vbv29\") on node \"crc\" DevicePath \"\"" Nov 24 09:38:52 crc kubenswrapper[4691]: I1124 09:38:52.651592 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0f1728cc-efe7-4b77-9fa5-604056334a1f-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "0f1728cc-efe7-4b77-9fa5-604056334a1f" (UID: "0f1728cc-efe7-4b77-9fa5-604056334a1f"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 09:38:52 crc kubenswrapper[4691]: I1124 09:38:52.683087 4691 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/0f1728cc-efe7-4b77-9fa5-604056334a1f-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 24 09:38:52 crc kubenswrapper[4691]: I1124 09:38:52.774313 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f1728cc-efe7-4b77-9fa5-604056334a1f" path="/var/lib/kubelet/pods/0f1728cc-efe7-4b77-9fa5-604056334a1f/volumes" Nov 24 09:38:52 crc kubenswrapper[4691]: I1124 09:38:52.943532 4691 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-t72qw_must-gather-k4bnj_0f1728cc-efe7-4b77-9fa5-604056334a1f/copy/0.log" Nov 24 09:38:52 crc kubenswrapper[4691]: I1124 09:38:52.944113 4691 generic.go:334] "Generic (PLEG): container finished" podID="0f1728cc-efe7-4b77-9fa5-604056334a1f" containerID="b1dbe6d19ef8767cc919b3e5ad5e4ab450f50dec394c648e0b6f7568fdb0995d" exitCode=143 Nov 24 09:38:52 crc kubenswrapper[4691]: I1124 09:38:52.944177 4691 scope.go:117] "RemoveContainer" containerID="b1dbe6d19ef8767cc919b3e5ad5e4ab450f50dec394c648e0b6f7568fdb0995d" Nov 24 09:38:52 crc kubenswrapper[4691]: I1124 09:38:52.944187 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-t72qw/must-gather-k4bnj" Nov 24 09:38:52 crc kubenswrapper[4691]: I1124 09:38:52.967846 4691 scope.go:117] "RemoveContainer" containerID="0839b9a60de1b0df703e9b3cf4dd93d84ff00c62ec535d49fe38bfb2c84369a5" Nov 24 09:38:53 crc kubenswrapper[4691]: I1124 09:38:53.043044 4691 scope.go:117] "RemoveContainer" containerID="b1dbe6d19ef8767cc919b3e5ad5e4ab450f50dec394c648e0b6f7568fdb0995d" Nov 24 09:38:53 crc kubenswrapper[4691]: E1124 09:38:53.045124 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1dbe6d19ef8767cc919b3e5ad5e4ab450f50dec394c648e0b6f7568fdb0995d\": container with ID starting with b1dbe6d19ef8767cc919b3e5ad5e4ab450f50dec394c648e0b6f7568fdb0995d not found: ID does not exist" containerID="b1dbe6d19ef8767cc919b3e5ad5e4ab450f50dec394c648e0b6f7568fdb0995d" Nov 24 09:38:53 crc kubenswrapper[4691]: I1124 09:38:53.045162 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1dbe6d19ef8767cc919b3e5ad5e4ab450f50dec394c648e0b6f7568fdb0995d"} err="failed to get container status \"b1dbe6d19ef8767cc919b3e5ad5e4ab450f50dec394c648e0b6f7568fdb0995d\": rpc error: code = NotFound desc = could not find container \"b1dbe6d19ef8767cc919b3e5ad5e4ab450f50dec394c648e0b6f7568fdb0995d\": container with ID starting with b1dbe6d19ef8767cc919b3e5ad5e4ab450f50dec394c648e0b6f7568fdb0995d not found: ID does not exist" Nov 24 09:38:53 crc kubenswrapper[4691]: I1124 09:38:53.045188 4691 scope.go:117] "RemoveContainer" containerID="0839b9a60de1b0df703e9b3cf4dd93d84ff00c62ec535d49fe38bfb2c84369a5" Nov 24 09:38:53 crc kubenswrapper[4691]: E1124 09:38:53.045978 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0839b9a60de1b0df703e9b3cf4dd93d84ff00c62ec535d49fe38bfb2c84369a5\": container with ID starting with 0839b9a60de1b0df703e9b3cf4dd93d84ff00c62ec535d49fe38bfb2c84369a5 not found: ID does not exist" containerID="0839b9a60de1b0df703e9b3cf4dd93d84ff00c62ec535d49fe38bfb2c84369a5" Nov 24 09:38:53 crc kubenswrapper[4691]: I1124 09:38:53.046005 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0839b9a60de1b0df703e9b3cf4dd93d84ff00c62ec535d49fe38bfb2c84369a5"} err="failed to get container status \"0839b9a60de1b0df703e9b3cf4dd93d84ff00c62ec535d49fe38bfb2c84369a5\": rpc error: code = NotFound desc = could not find container \"0839b9a60de1b0df703e9b3cf4dd93d84ff00c62ec535d49fe38bfb2c84369a5\": container with ID starting with 0839b9a60de1b0df703e9b3cf4dd93d84ff00c62ec535d49fe38bfb2c84369a5 not found: ID does not exist" Nov 24 09:39:51 crc kubenswrapper[4691]: I1124 09:39:51.089612 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 09:39:51 crc kubenswrapper[4691]: I1124 09:39:51.090149 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 09:40:21 crc kubenswrapper[4691]: I1124 09:40:21.089213 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 09:40:21 crc kubenswrapper[4691]: I1124 09:40:21.089756 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 09:40:25 crc kubenswrapper[4691]: I1124 09:40:25.863950 4691 scope.go:117] "RemoveContainer" containerID="dd8a1a9a463b680fc2fbf6dd88b9e192020d7aad75d5c6eaca0b44be55d58385" Nov 24 09:40:25 crc kubenswrapper[4691]: I1124 09:40:25.897883 4691 scope.go:117] "RemoveContainer" containerID="9f9a9e428205204b015bcb6a94a9035d1a488f894e7e0fda7c544ed2459ec4b7" Nov 24 09:40:51 crc kubenswrapper[4691]: I1124 09:40:51.089672 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 09:40:51 crc kubenswrapper[4691]: I1124 09:40:51.090238 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 24 09:40:51 crc kubenswrapper[4691]: I1124 09:40:51.090304 4691 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" Nov 24 09:40:51 crc kubenswrapper[4691]: I1124 09:40:51.091381 4691 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3d4d370f4b40a169fa9cd55b58a6a65b5bfb32180c07eaee9baf9b0b5956f84f"} pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 24 09:40:51 crc kubenswrapper[4691]: I1124 09:40:51.091490 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" containerID="cri-o://3d4d370f4b40a169fa9cd55b58a6a65b5bfb32180c07eaee9baf9b0b5956f84f" gracePeriod=600 Nov 24 09:40:52 crc kubenswrapper[4691]: I1124 09:40:52.039487 4691 generic.go:334] "Generic (PLEG): container finished" podID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerID="3d4d370f4b40a169fa9cd55b58a6a65b5bfb32180c07eaee9baf9b0b5956f84f" exitCode=0 Nov 24 09:40:52 crc kubenswrapper[4691]: I1124 09:40:52.039589 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerDied","Data":"3d4d370f4b40a169fa9cd55b58a6a65b5bfb32180c07eaee9baf9b0b5956f84f"} Nov 24 09:40:52 crc kubenswrapper[4691]: I1124 09:40:52.039842 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" event={"ID":"54ccc455-9127-4afd-b3a4-7fc35181bf93","Type":"ContainerStarted","Data":"868fe175520e8a668dafb9bb7b74a029478609588152b4778b8078ef90196cb4"} Nov 24 09:40:52 crc kubenswrapper[4691]: I1124 09:40:52.039866 4691 scope.go:117] "RemoveContainer" containerID="9d5a7ae14d2062d9b51ce6e4e46f00f60586eef65cf9e6e4ed8bcf4d441bfe6c" Nov 24 09:41:44 crc kubenswrapper[4691]: I1124 09:41:44.917557 4691 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-p256s"] Nov 24 09:41:44 crc kubenswrapper[4691]: E1124 09:41:44.918874 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85281ab7-1522-4837-9b58-5ed9e7603bcf" containerName="extract-content" Nov 24 09:41:44 crc kubenswrapper[4691]: I1124 09:41:44.918894 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="85281ab7-1522-4837-9b58-5ed9e7603bcf" containerName="extract-content" Nov 24 09:41:44 crc kubenswrapper[4691]: E1124 09:41:44.918950 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f1728cc-efe7-4b77-9fa5-604056334a1f" containerName="gather" Nov 24 09:41:44 crc kubenswrapper[4691]: I1124 09:41:44.918960 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f1728cc-efe7-4b77-9fa5-604056334a1f" containerName="gather" Nov 24 09:41:44 crc kubenswrapper[4691]: E1124 09:41:44.918976 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6" containerName="extract-content" Nov 24 09:41:44 crc kubenswrapper[4691]: I1124 09:41:44.918984 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6" containerName="extract-content" Nov 24 09:41:44 crc kubenswrapper[4691]: E1124 09:41:44.919000 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85281ab7-1522-4837-9b58-5ed9e7603bcf" containerName="registry-server" Nov 24 09:41:44 crc kubenswrapper[4691]: I1124 09:41:44.919008 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="85281ab7-1522-4837-9b58-5ed9e7603bcf" containerName="registry-server" Nov 24 09:41:44 crc kubenswrapper[4691]: E1124 09:41:44.919021 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6" containerName="extract-utilities" Nov 24 09:41:44 crc kubenswrapper[4691]: I1124 09:41:44.919029 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6" containerName="extract-utilities" Nov 24 09:41:44 crc kubenswrapper[4691]: E1124 09:41:44.919043 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6" containerName="registry-server" Nov 24 09:41:44 crc kubenswrapper[4691]: I1124 09:41:44.919050 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6" containerName="registry-server" Nov 24 09:41:44 crc kubenswrapper[4691]: E1124 09:41:44.919070 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85281ab7-1522-4837-9b58-5ed9e7603bcf" containerName="extract-utilities" Nov 24 09:41:44 crc kubenswrapper[4691]: I1124 09:41:44.919078 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="85281ab7-1522-4837-9b58-5ed9e7603bcf" containerName="extract-utilities" Nov 24 09:41:44 crc kubenswrapper[4691]: E1124 09:41:44.919095 4691 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f1728cc-efe7-4b77-9fa5-604056334a1f" containerName="copy" Nov 24 09:41:44 crc kubenswrapper[4691]: I1124 09:41:44.919102 4691 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f1728cc-efe7-4b77-9fa5-604056334a1f" containerName="copy" Nov 24 09:41:44 crc kubenswrapper[4691]: I1124 09:41:44.919317 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f1728cc-efe7-4b77-9fa5-604056334a1f" containerName="copy" Nov 24 09:41:44 crc kubenswrapper[4691]: I1124 09:41:44.919334 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d1a8ea7-d66d-42dd-9dbd-3f953cee51a6" containerName="registry-server" Nov 24 09:41:44 crc kubenswrapper[4691]: I1124 09:41:44.919363 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="85281ab7-1522-4837-9b58-5ed9e7603bcf" containerName="registry-server" Nov 24 09:41:44 crc kubenswrapper[4691]: I1124 09:41:44.919382 4691 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f1728cc-efe7-4b77-9fa5-604056334a1f" containerName="gather" Nov 24 09:41:44 crc kubenswrapper[4691]: I1124 09:41:44.921260 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p256s" Nov 24 09:41:44 crc kubenswrapper[4691]: I1124 09:41:44.942384 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-p256s"] Nov 24 09:41:44 crc kubenswrapper[4691]: I1124 09:41:44.982599 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86145f31-dc1d-4e65-aa3f-b11c83f23a29-catalog-content\") pod \"redhat-marketplace-p256s\" (UID: \"86145f31-dc1d-4e65-aa3f-b11c83f23a29\") " pod="openshift-marketplace/redhat-marketplace-p256s" Nov 24 09:41:44 crc kubenswrapper[4691]: I1124 09:41:44.982656 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86145f31-dc1d-4e65-aa3f-b11c83f23a29-utilities\") pod \"redhat-marketplace-p256s\" (UID: \"86145f31-dc1d-4e65-aa3f-b11c83f23a29\") " pod="openshift-marketplace/redhat-marketplace-p256s" Nov 24 09:41:44 crc kubenswrapper[4691]: I1124 09:41:44.982693 4691 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pqwfg\" (UniqueName: \"kubernetes.io/projected/86145f31-dc1d-4e65-aa3f-b11c83f23a29-kube-api-access-pqwfg\") pod \"redhat-marketplace-p256s\" (UID: \"86145f31-dc1d-4e65-aa3f-b11c83f23a29\") " pod="openshift-marketplace/redhat-marketplace-p256s" Nov 24 09:41:45 crc kubenswrapper[4691]: I1124 09:41:45.083291 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86145f31-dc1d-4e65-aa3f-b11c83f23a29-utilities\") pod \"redhat-marketplace-p256s\" (UID: \"86145f31-dc1d-4e65-aa3f-b11c83f23a29\") " pod="openshift-marketplace/redhat-marketplace-p256s" Nov 24 09:41:45 crc kubenswrapper[4691]: I1124 09:41:45.083370 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pqwfg\" (UniqueName: \"kubernetes.io/projected/86145f31-dc1d-4e65-aa3f-b11c83f23a29-kube-api-access-pqwfg\") pod \"redhat-marketplace-p256s\" (UID: \"86145f31-dc1d-4e65-aa3f-b11c83f23a29\") " pod="openshift-marketplace/redhat-marketplace-p256s" Nov 24 09:41:45 crc kubenswrapper[4691]: I1124 09:41:45.083559 4691 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86145f31-dc1d-4e65-aa3f-b11c83f23a29-catalog-content\") pod \"redhat-marketplace-p256s\" (UID: \"86145f31-dc1d-4e65-aa3f-b11c83f23a29\") " pod="openshift-marketplace/redhat-marketplace-p256s" Nov 24 09:41:45 crc kubenswrapper[4691]: I1124 09:41:45.083826 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86145f31-dc1d-4e65-aa3f-b11c83f23a29-utilities\") pod \"redhat-marketplace-p256s\" (UID: \"86145f31-dc1d-4e65-aa3f-b11c83f23a29\") " pod="openshift-marketplace/redhat-marketplace-p256s" Nov 24 09:41:45 crc kubenswrapper[4691]: I1124 09:41:45.084003 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86145f31-dc1d-4e65-aa3f-b11c83f23a29-catalog-content\") pod \"redhat-marketplace-p256s\" (UID: \"86145f31-dc1d-4e65-aa3f-b11c83f23a29\") " pod="openshift-marketplace/redhat-marketplace-p256s" Nov 24 09:41:45 crc kubenswrapper[4691]: I1124 09:41:45.106224 4691 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pqwfg\" (UniqueName: \"kubernetes.io/projected/86145f31-dc1d-4e65-aa3f-b11c83f23a29-kube-api-access-pqwfg\") pod \"redhat-marketplace-p256s\" (UID: \"86145f31-dc1d-4e65-aa3f-b11c83f23a29\") " pod="openshift-marketplace/redhat-marketplace-p256s" Nov 24 09:41:45 crc kubenswrapper[4691]: I1124 09:41:45.244906 4691 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p256s" Nov 24 09:41:45 crc kubenswrapper[4691]: I1124 09:41:45.761060 4691 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-p256s"] Nov 24 09:41:46 crc kubenswrapper[4691]: I1124 09:41:46.580789 4691 generic.go:334] "Generic (PLEG): container finished" podID="86145f31-dc1d-4e65-aa3f-b11c83f23a29" containerID="b44d046383f03822d8c97e0ff18e948312a60672df23f3edec8ac542dca27f25" exitCode=0 Nov 24 09:41:46 crc kubenswrapper[4691]: I1124 09:41:46.580921 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p256s" event={"ID":"86145f31-dc1d-4e65-aa3f-b11c83f23a29","Type":"ContainerDied","Data":"b44d046383f03822d8c97e0ff18e948312a60672df23f3edec8ac542dca27f25"} Nov 24 09:41:46 crc kubenswrapper[4691]: I1124 09:41:46.581230 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p256s" event={"ID":"86145f31-dc1d-4e65-aa3f-b11c83f23a29","Type":"ContainerStarted","Data":"40dc7bb61ec540a4ae7b887231cb9bb198d0cc92e14ac4358663c5c65d8bd342"} Nov 24 09:41:46 crc kubenswrapper[4691]: I1124 09:41:46.586928 4691 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 24 09:41:47 crc kubenswrapper[4691]: I1124 09:41:47.596090 4691 generic.go:334] "Generic (PLEG): container finished" podID="86145f31-dc1d-4e65-aa3f-b11c83f23a29" containerID="eec38ef8f905f1ed6060e3ab3547c5e6c5ac0a07232d1c57bc20be4f6bee8084" exitCode=0 Nov 24 09:41:47 crc kubenswrapper[4691]: I1124 09:41:47.596203 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p256s" event={"ID":"86145f31-dc1d-4e65-aa3f-b11c83f23a29","Type":"ContainerDied","Data":"eec38ef8f905f1ed6060e3ab3547c5e6c5ac0a07232d1c57bc20be4f6bee8084"} Nov 24 09:41:48 crc kubenswrapper[4691]: I1124 09:41:48.607163 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p256s" event={"ID":"86145f31-dc1d-4e65-aa3f-b11c83f23a29","Type":"ContainerStarted","Data":"4d0e39bbf2de49727a09696823b6d82f34b5c94ac8d59272cead7becd013dc3b"} Nov 24 09:41:48 crc kubenswrapper[4691]: I1124 09:41:48.634174 4691 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-p256s" podStartSLOduration=3.241926266 podStartE2EDuration="4.63414478s" podCreationTimestamp="2025-11-24 09:41:44 +0000 UTC" firstStartedPulling="2025-11-24 09:41:46.585870991 +0000 UTC m=+6268.584820250" lastFinishedPulling="2025-11-24 09:41:47.978089475 +0000 UTC m=+6269.977038764" observedRunningTime="2025-11-24 09:41:48.622882289 +0000 UTC m=+6270.621831548" watchObservedRunningTime="2025-11-24 09:41:48.63414478 +0000 UTC m=+6270.633094039" Nov 24 09:41:55 crc kubenswrapper[4691]: I1124 09:41:55.245965 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-p256s" Nov 24 09:41:55 crc kubenswrapper[4691]: I1124 09:41:55.246557 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-p256s" Nov 24 09:41:55 crc kubenswrapper[4691]: I1124 09:41:55.292935 4691 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-p256s" Nov 24 09:41:55 crc kubenswrapper[4691]: I1124 09:41:55.756249 4691 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-p256s" Nov 24 09:41:55 crc kubenswrapper[4691]: I1124 09:41:55.808555 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-p256s"] Nov 24 09:41:57 crc kubenswrapper[4691]: I1124 09:41:57.696913 4691 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-p256s" podUID="86145f31-dc1d-4e65-aa3f-b11c83f23a29" containerName="registry-server" containerID="cri-o://4d0e39bbf2de49727a09696823b6d82f34b5c94ac8d59272cead7becd013dc3b" gracePeriod=2 Nov 24 09:41:58 crc kubenswrapper[4691]: I1124 09:41:58.195669 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p256s" Nov 24 09:41:58 crc kubenswrapper[4691]: I1124 09:41:58.355626 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86145f31-dc1d-4e65-aa3f-b11c83f23a29-catalog-content\") pod \"86145f31-dc1d-4e65-aa3f-b11c83f23a29\" (UID: \"86145f31-dc1d-4e65-aa3f-b11c83f23a29\") " Nov 24 09:41:58 crc kubenswrapper[4691]: I1124 09:41:58.355734 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86145f31-dc1d-4e65-aa3f-b11c83f23a29-utilities\") pod \"86145f31-dc1d-4e65-aa3f-b11c83f23a29\" (UID: \"86145f31-dc1d-4e65-aa3f-b11c83f23a29\") " Nov 24 09:41:58 crc kubenswrapper[4691]: I1124 09:41:58.355906 4691 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pqwfg\" (UniqueName: \"kubernetes.io/projected/86145f31-dc1d-4e65-aa3f-b11c83f23a29-kube-api-access-pqwfg\") pod \"86145f31-dc1d-4e65-aa3f-b11c83f23a29\" (UID: \"86145f31-dc1d-4e65-aa3f-b11c83f23a29\") " Nov 24 09:41:58 crc kubenswrapper[4691]: I1124 09:41:58.356786 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/86145f31-dc1d-4e65-aa3f-b11c83f23a29-utilities" (OuterVolumeSpecName: "utilities") pod "86145f31-dc1d-4e65-aa3f-b11c83f23a29" (UID: "86145f31-dc1d-4e65-aa3f-b11c83f23a29"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 09:41:58 crc kubenswrapper[4691]: I1124 09:41:58.362600 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/86145f31-dc1d-4e65-aa3f-b11c83f23a29-kube-api-access-pqwfg" (OuterVolumeSpecName: "kube-api-access-pqwfg") pod "86145f31-dc1d-4e65-aa3f-b11c83f23a29" (UID: "86145f31-dc1d-4e65-aa3f-b11c83f23a29"). InnerVolumeSpecName "kube-api-access-pqwfg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 24 09:41:58 crc kubenswrapper[4691]: I1124 09:41:58.392573 4691 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/86145f31-dc1d-4e65-aa3f-b11c83f23a29-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "86145f31-dc1d-4e65-aa3f-b11c83f23a29" (UID: "86145f31-dc1d-4e65-aa3f-b11c83f23a29"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 24 09:41:58 crc kubenswrapper[4691]: I1124 09:41:58.458698 4691 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86145f31-dc1d-4e65-aa3f-b11c83f23a29-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 24 09:41:58 crc kubenswrapper[4691]: I1124 09:41:58.459097 4691 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86145f31-dc1d-4e65-aa3f-b11c83f23a29-utilities\") on node \"crc\" DevicePath \"\"" Nov 24 09:41:58 crc kubenswrapper[4691]: I1124 09:41:58.459195 4691 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pqwfg\" (UniqueName: \"kubernetes.io/projected/86145f31-dc1d-4e65-aa3f-b11c83f23a29-kube-api-access-pqwfg\") on node \"crc\" DevicePath \"\"" Nov 24 09:41:58 crc kubenswrapper[4691]: I1124 09:41:58.713300 4691 generic.go:334] "Generic (PLEG): container finished" podID="86145f31-dc1d-4e65-aa3f-b11c83f23a29" containerID="4d0e39bbf2de49727a09696823b6d82f34b5c94ac8d59272cead7becd013dc3b" exitCode=0 Nov 24 09:41:58 crc kubenswrapper[4691]: I1124 09:41:58.713392 4691 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p256s" Nov 24 09:41:58 crc kubenswrapper[4691]: I1124 09:41:58.713402 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p256s" event={"ID":"86145f31-dc1d-4e65-aa3f-b11c83f23a29","Type":"ContainerDied","Data":"4d0e39bbf2de49727a09696823b6d82f34b5c94ac8d59272cead7becd013dc3b"} Nov 24 09:41:58 crc kubenswrapper[4691]: I1124 09:41:58.714281 4691 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p256s" event={"ID":"86145f31-dc1d-4e65-aa3f-b11c83f23a29","Type":"ContainerDied","Data":"40dc7bb61ec540a4ae7b887231cb9bb198d0cc92e14ac4358663c5c65d8bd342"} Nov 24 09:41:58 crc kubenswrapper[4691]: I1124 09:41:58.714317 4691 scope.go:117] "RemoveContainer" containerID="4d0e39bbf2de49727a09696823b6d82f34b5c94ac8d59272cead7becd013dc3b" Nov 24 09:41:58 crc kubenswrapper[4691]: I1124 09:41:58.746769 4691 scope.go:117] "RemoveContainer" containerID="eec38ef8f905f1ed6060e3ab3547c5e6c5ac0a07232d1c57bc20be4f6bee8084" Nov 24 09:41:58 crc kubenswrapper[4691]: I1124 09:41:58.782013 4691 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-p256s"] Nov 24 09:41:58 crc kubenswrapper[4691]: I1124 09:41:58.784053 4691 scope.go:117] "RemoveContainer" containerID="b44d046383f03822d8c97e0ff18e948312a60672df23f3edec8ac542dca27f25" Nov 24 09:41:58 crc kubenswrapper[4691]: I1124 09:41:58.790180 4691 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-p256s"] Nov 24 09:41:58 crc kubenswrapper[4691]: I1124 09:41:58.842330 4691 scope.go:117] "RemoveContainer" containerID="4d0e39bbf2de49727a09696823b6d82f34b5c94ac8d59272cead7becd013dc3b" Nov 24 09:41:58 crc kubenswrapper[4691]: E1124 09:41:58.843266 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d0e39bbf2de49727a09696823b6d82f34b5c94ac8d59272cead7becd013dc3b\": container with ID starting with 4d0e39bbf2de49727a09696823b6d82f34b5c94ac8d59272cead7becd013dc3b not found: ID does not exist" containerID="4d0e39bbf2de49727a09696823b6d82f34b5c94ac8d59272cead7becd013dc3b" Nov 24 09:41:58 crc kubenswrapper[4691]: I1124 09:41:58.843329 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d0e39bbf2de49727a09696823b6d82f34b5c94ac8d59272cead7becd013dc3b"} err="failed to get container status \"4d0e39bbf2de49727a09696823b6d82f34b5c94ac8d59272cead7becd013dc3b\": rpc error: code = NotFound desc = could not find container \"4d0e39bbf2de49727a09696823b6d82f34b5c94ac8d59272cead7becd013dc3b\": container with ID starting with 4d0e39bbf2de49727a09696823b6d82f34b5c94ac8d59272cead7becd013dc3b not found: ID does not exist" Nov 24 09:41:58 crc kubenswrapper[4691]: I1124 09:41:58.843354 4691 scope.go:117] "RemoveContainer" containerID="eec38ef8f905f1ed6060e3ab3547c5e6c5ac0a07232d1c57bc20be4f6bee8084" Nov 24 09:41:58 crc kubenswrapper[4691]: E1124 09:41:58.846464 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eec38ef8f905f1ed6060e3ab3547c5e6c5ac0a07232d1c57bc20be4f6bee8084\": container with ID starting with eec38ef8f905f1ed6060e3ab3547c5e6c5ac0a07232d1c57bc20be4f6bee8084 not found: ID does not exist" containerID="eec38ef8f905f1ed6060e3ab3547c5e6c5ac0a07232d1c57bc20be4f6bee8084" Nov 24 09:41:58 crc kubenswrapper[4691]: I1124 09:41:58.846497 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eec38ef8f905f1ed6060e3ab3547c5e6c5ac0a07232d1c57bc20be4f6bee8084"} err="failed to get container status \"eec38ef8f905f1ed6060e3ab3547c5e6c5ac0a07232d1c57bc20be4f6bee8084\": rpc error: code = NotFound desc = could not find container \"eec38ef8f905f1ed6060e3ab3547c5e6c5ac0a07232d1c57bc20be4f6bee8084\": container with ID starting with eec38ef8f905f1ed6060e3ab3547c5e6c5ac0a07232d1c57bc20be4f6bee8084 not found: ID does not exist" Nov 24 09:41:58 crc kubenswrapper[4691]: I1124 09:41:58.846519 4691 scope.go:117] "RemoveContainer" containerID="b44d046383f03822d8c97e0ff18e948312a60672df23f3edec8ac542dca27f25" Nov 24 09:41:58 crc kubenswrapper[4691]: E1124 09:41:58.846768 4691 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b44d046383f03822d8c97e0ff18e948312a60672df23f3edec8ac542dca27f25\": container with ID starting with b44d046383f03822d8c97e0ff18e948312a60672df23f3edec8ac542dca27f25 not found: ID does not exist" containerID="b44d046383f03822d8c97e0ff18e948312a60672df23f3edec8ac542dca27f25" Nov 24 09:41:58 crc kubenswrapper[4691]: I1124 09:41:58.846795 4691 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b44d046383f03822d8c97e0ff18e948312a60672df23f3edec8ac542dca27f25"} err="failed to get container status \"b44d046383f03822d8c97e0ff18e948312a60672df23f3edec8ac542dca27f25\": rpc error: code = NotFound desc = could not find container \"b44d046383f03822d8c97e0ff18e948312a60672df23f3edec8ac542dca27f25\": container with ID starting with b44d046383f03822d8c97e0ff18e948312a60672df23f3edec8ac542dca27f25 not found: ID does not exist" Nov 24 09:42:00 crc kubenswrapper[4691]: I1124 09:42:00.773904 4691 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="86145f31-dc1d-4e65-aa3f-b11c83f23a29" path="/var/lib/kubelet/pods/86145f31-dc1d-4e65-aa3f-b11c83f23a29/volumes" Nov 24 09:42:51 crc kubenswrapper[4691]: I1124 09:42:51.089120 4691 patch_prober.go:28] interesting pod/machine-config-daemon-fcwmc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 24 09:42:51 crc kubenswrapper[4691]: I1124 09:42:51.089845 4691 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-fcwmc" podUID="54ccc455-9127-4afd-b3a4-7fc35181bf93" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515111024256024442 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015111024257017360 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015111007440016476 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015111007441015447 5ustar corecore